code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_snake_case = 25_0004
_snake_case = 25_0020
@require_sentencepiece
@require_tokenizers
class _a ( UpperCAmelCase__ , unittest.TestCase ):
a_ : List[Any] = MBartaaTokenizer
a_ : List[str] = MBartaaTokenizerFast
a_ : int = True
a_ : str = True
def _UpperCamelCase ( self : str ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ = MBartaaTokenizer(__lowerCAmelCase , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = '<s>'
lowerCamelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(__lowerCAmelCase ) , 10_54 )
def _UpperCamelCase ( self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = MBartaaTokenizer(__lowerCAmelCase , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=__lowerCAmelCase )
lowerCamelCase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(__lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowerCamelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__lowerCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = {'input_ids': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def _UpperCamelCase ( self : Dict ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase__ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = tokenizer_r.save_pretrained(__lowerCAmelCase )
lowerCamelCase__ = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCamelCase__ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ = tokenizer_r.from_pretrained(__lowerCAmelCase )
lowerCamelCase__ = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=True
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase )
lowerCamelCase__ = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ = tokenizer_r.from_pretrained(__lowerCAmelCase )
lowerCamelCase__ = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=False
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase )
lowerCamelCase__ = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase__ = tokenizer_r.from_pretrained(__lowerCAmelCase )
lowerCamelCase__ = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
a_ : Optional[Any] = 'facebook/mbart-large-50-one-to-many-mmt'
a_ : List[str] = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
a_ : Any = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
a_ : Any = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def _UpperCamelCase ( cls : List[str] ):
lowerCamelCase__ = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
lowerCamelCase__ = 1
return cls
def _UpperCamelCase ( self : Tuple ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_00_38 )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase )
def _UpperCamelCase ( self : Optional[Any] ):
self.assertIn(__lowerCAmelCase , self.tokenizer.all_special_ids )
lowerCamelCase__ = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
lowerCamelCase__ = self.tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
lowerCamelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCAmelCase )
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , __lowerCAmelCase )
lowerCamelCase__ = 10
lowerCamelCase__ = self.tokenizer(__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase ).input_ids[0]
self.assertEqual(ids[0] , __lowerCAmelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
def _UpperCamelCase ( self : Optional[int] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_00_53, 25_00_01] )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCAmelCase )
lowerCamelCase__ = MBartaaTokenizer.from_pretrained(__lowerCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCAmelCase )
@require_torch
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , return_tensors='pt' )
lowerCamelCase__ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
lowerCamelCase__ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCamelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = self.tokenizer(self.src_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=3 , return_tensors='pt' )
lowerCamelCase__ = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=10 , return_tensors='pt' )
lowerCamelCase__ = targets['input_ids']
lowerCamelCase__ = shift_tokens_right(__lowerCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {
# en_XX, A, test, EOS
'input_ids': [[25_00_04, 62, 30_34, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_00_01,
} , )
| 710 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_snake_case = logging.get_logger(__name__)
_snake_case = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _a ( __UpperCAmelCase ):
a_ : Tuple = """longformer"""
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] = 5_12 , SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 , SCREAMING_SNAKE_CASE__ : List[str] = 1 , SCREAMING_SNAKE_CASE__ : Tuple = 0 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : Any = 3_05_22 , SCREAMING_SNAKE_CASE__ : Tuple = 7_68 , SCREAMING_SNAKE_CASE__ : List[Any] = 12 , SCREAMING_SNAKE_CASE__ : Any = 12 , SCREAMING_SNAKE_CASE__ : Dict = 30_72 , SCREAMING_SNAKE_CASE__ : Optional[int] = "gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any] = 0.1 , SCREAMING_SNAKE_CASE__ : List[str] = 0.1 , SCREAMING_SNAKE_CASE__ : str = 5_12 , SCREAMING_SNAKE_CASE__ : List[str] = 2 , SCREAMING_SNAKE_CASE__ : Any = 0.02 , SCREAMING_SNAKE_CASE__ : Tuple = 1e-12 , SCREAMING_SNAKE_CASE__ : int = False , **SCREAMING_SNAKE_CASE__ : int , ):
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase__ = attention_window
lowerCamelCase__ = sep_token_id
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = onnx_export
class _a ( __UpperCAmelCase ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] = "default" , SCREAMING_SNAKE_CASE__ : Union[str, Any] = None ):
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase__ = True
@property
def _UpperCamelCase ( self : Union[str, Any] ):
if self.task == "multiple-choice":
lowerCamelCase__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = super().outputs
if self.task == "default":
lowerCamelCase__ = {0: 'batch'}
return outputs
@property
def _UpperCamelCase ( self : Optional[Any] ):
return 1e-4
@property
def _UpperCamelCase ( self : str ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] = -1 , SCREAMING_SNAKE_CASE__ : List[str] = -1 , SCREAMING_SNAKE_CASE__ : Optional[int] = False , SCREAMING_SNAKE_CASE__ : int = None , ):
lowerCamelCase__ = super().generate_dummy_inputs(
preprocessor=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCamelCase__ = torch.zeros_like(inputs['input_ids'] )
# make every second token global
lowerCamelCase__ = 1
return inputs
| 711 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[str, Any] = 'swinv2'
a_ : Optional[int] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int=2_24 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : str=96 , SCREAMING_SNAKE_CASE__ : Dict=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4.0 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : int=32 , **SCREAMING_SNAKE_CASE__ : List[str] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
lowerCamelCase__ = (0, 0, 0, 0)
| 659 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
_snake_case = logging.get_logger(__name__)
_snake_case = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class _a ( UpperCamelCase_ ):
a_ : List[Any] = 'layoutlmv3'
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_02_65 , SCREAMING_SNAKE_CASE__ : List[Any]=7_68 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : Tuple=30_72 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : Any=5_12 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : List[str]=10_24 , SCREAMING_SNAKE_CASE__ : str=1_28 , SCREAMING_SNAKE_CASE__ : str=1_28 , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Optional[int]=32 , SCREAMING_SNAKE_CASE__ : Any=1_28 , SCREAMING_SNAKE_CASE__ : Optional[Any]=64 , SCREAMING_SNAKE_CASE__ : Dict=2_56 , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=2_24 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Any=16 , SCREAMING_SNAKE_CASE__ : Any=None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
super().__init__(
vocab_size=UpperCamelCase__ , hidden_size=UpperCamelCase__ , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , intermediate_size=UpperCamelCase__ , hidden_act=UpperCamelCase__ , hidden_dropout_prob=UpperCamelCase__ , attention_probs_dropout_prob=UpperCamelCase__ , max_position_embeddings=UpperCamelCase__ , type_vocab_size=UpperCamelCase__ , initializer_range=UpperCamelCase__ , layer_norm_eps=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ = max_ad_position_embeddings
lowerCamelCase__ = coordinate_size
lowerCamelCase__ = shape_size
lowerCamelCase__ = has_relative_attention_bias
lowerCamelCase__ = rel_pos_bins
lowerCamelCase__ = max_rel_pos
lowerCamelCase__ = has_spatial_attention_bias
lowerCamelCase__ = rel_ad_pos_bins
lowerCamelCase__ = max_rel_ad_pos
lowerCamelCase__ = text_embed
lowerCamelCase__ = visual_embed
lowerCamelCase__ = input_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = patch_size
lowerCamelCase__ = classifier_dropout
class _a ( UpperCamelCase_ ):
a_ : str = version.parse('1.12' )
@property
def _UpperCamelCase ( self : str ):
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def _UpperCamelCase ( self : int ):
return 1e-5
@property
def _UpperCamelCase ( self : Union[str, Any] ):
return 12
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : "ProcessorMixin" , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : int = 40 , ):
setattr(processor.image_processor , 'apply_ocr' , UpperCamelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCamelCase__ = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase__ = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
lowerCamelCase__ = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase__ = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowerCamelCase__ = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowerCamelCase__ = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ = dict(
processor(
UpperCamelCase__ , text=UpperCamelCase__ , boxes=UpperCamelCase__ , return_tensors=UpperCamelCase__ , ) )
return inputs
| 712 |
"""simple docstring"""
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 0
while number > 0:
lowerCamelCase__ = number % 10
sum_of_digits += last_digit
lowerCamelCase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case ( _a: int = 100 )-> int:
'''simple docstring'''
lowerCamelCase__ = factorial(_a )
lowerCamelCase__ = split_and_add(_a )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 659 | 0 |
"""simple docstring"""
import numpy as np
class _a :
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
if red is not None:
lowerCamelCase__ = red
if green is not None:
lowerCamelCase__ = green
if blue is not None:
lowerCamelCase__ = blue
if red_edge is not None:
lowerCamelCase__ = red_edge
if nir is not None:
lowerCamelCase__ = nir
return True
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Dict="" , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None ):
self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE )
lowerCamelCase__ = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def _UpperCamelCase ( self : Any ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def _UpperCamelCase ( self : str ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _UpperCamelCase ( self : Optional[Any] ):
return self.nir * (self.red / (self.green**2))
def _UpperCamelCase ( self : Union[str, Any] ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _UpperCamelCase ( self : Any ):
return (self.nir - self.red) / (self.nir + self.red)
def _UpperCamelCase ( self : List[Any] ):
return (self.nir - self.blue) / (self.nir + self.blue)
def _UpperCamelCase ( self : Optional[Any] ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _UpperCamelCase ( self : str ):
return (self.nir - self.green) / (self.nir + self.green)
def _UpperCamelCase ( self : Dict ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _UpperCamelCase ( self : Tuple ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _UpperCamelCase ( self : Dict ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _UpperCamelCase ( self : List[str] ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.08 , SCREAMING_SNAKE_CASE__ : List[str]=1.22 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _UpperCamelCase ( self : List[Any] ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _UpperCamelCase ( self : List[str] ):
return (self.nir / self.green) - 1
def _UpperCamelCase ( self : int ):
return (self.nir / self.redEdge) - 1
def _UpperCamelCase ( self : Union[str, Any] ):
return (self.red - self.blue) / self.red
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _UpperCamelCase ( self : List[Any] ):
return self.nir - self.green
def _UpperCamelCase ( self : List[Any] ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Dict=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _UpperCamelCase ( self : str ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Dict=None ):
return (self.nir - b) / (a * self.red)
def _UpperCamelCase ( self : Optional[Any] ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _UpperCamelCase ( self : str ):
return (self.red + self.green + self.blue) / 30.5
def _UpperCamelCase ( self : Tuple ):
return self.nir / self.red
def _UpperCamelCase ( self : List[Any] ):
return (self.rvi() - 1) / (self.rvi() + 1)
def _UpperCamelCase ( self : Union[str, Any] ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _UpperCamelCase ( self : int ):
return self.green / (self.nir + self.red + self.green)
def _UpperCamelCase ( self : Optional[Any] ):
return self.nir / (self.nir + self.red + self.green)
def _UpperCamelCase ( self : Optional[int] ):
return self.red / (self.nir + self.red + self.green)
def _UpperCamelCase ( self : List[str] ):
return (self.green - self.red) / (self.green + self.red)
def _UpperCamelCase ( self : Tuple ):
return (self.red - self.green) / (self.red + self.green)
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCamelCase__ = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _UpperCamelCase ( self : List[str] ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _UpperCamelCase ( self : str ):
return self.nir / self.red
def _UpperCamelCase ( self : List[str] ):
return (self.ndvi() + 0.5) ** (1 / 2)
def _UpperCamelCase ( self : Tuple ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 713 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def snake_case ( _a: List[str] )-> Optional[MinHash]:
'''simple docstring'''
if len(_a ) < MIN_NUM_TOKENS:
return None
lowerCamelCase__ = MinHash(num_perm=_a )
for token in set(_a ):
min_hash.update(token.encode() )
return min_hash
def snake_case ( _a: str )-> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(_a ) if len(t.strip() ) > 0}
class _a :
def __init__( self : List[Any] , *,
SCREAMING_SNAKE_CASE__ : float = 0.85 , ):
lowerCamelCase__ = duplication_jaccard_threshold
lowerCamelCase__ = NUM_PERM
lowerCamelCase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : MinHash ):
lowerCamelCase__ = self._index.query(SCREAMING_SNAKE_CASE__ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(SCREAMING_SNAKE_CASE__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCamelCase__ = [base] + list(SCREAMING_SNAKE_CASE__ )
# reformat the cluster to be a list of dict
lowerCamelCase__ = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(SCREAMING_SNAKE_CASE__ )
return duplicate_clusters
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.get_duplicate_clusters()
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = element
lowerCamelCase__ = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case ( _a: Type[Dataset] )-> Tuple:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_a , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case ( _a: Type[Dataset] , _a: float )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = DuplicationIndex(duplication_jaccard_threshold=_a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_a ) ) , max_queue_size=100 ) ):
di.add(_a , _a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case ( _a: str , _a: str )-> float:
'''simple docstring'''
lowerCamelCase__ = get_tokens(_a )
lowerCamelCase__ = get_tokens(_a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def snake_case ( _a: Dict , _a: Union[str, Any] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for elementa in cluster:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_a , _a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCamelCase__ = 1
extremes.append(_a )
return extremes
def snake_case ( _a: Any , _a: Tuple , _a: Dict )-> Union[str, Any]:
'''simple docstring'''
global _shared_dataset
lowerCamelCase__ = dataset
lowerCamelCase__ = []
lowerCamelCase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=_a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_a , _a , ) , total=len(_a ) , ):
extremes_list.append(_a )
return extremes_list
def snake_case ( _a: Type[Dataset] , _a: float = 0.85 )-> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowerCamelCase__ = make_duplicate_clusters(_a , _a )
lowerCamelCase__ = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
lowerCamelCase__ = {}
lowerCamelCase__ = find_extremes(_a , _a , _a )
for extremes in extremes_clusters:
for element in extremes:
lowerCamelCase__ = element
lowerCamelCase__ = duplicate_indices - set(extreme_dict.keys() )
lowerCamelCase__ = dataset.filter(lambda _a , _a : idx not in remove_indices , with_indices=_a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCamelCase__ = element['base_index'] in extreme_dict
if element["is_extreme"]:
lowerCamelCase__ = extreme_dict[element['base_index']]['copies']
print(F'Original dataset size: {len(_a )}' )
print(F'Number of duplicate clusters: {len(_a )}' )
print(F'Files in duplicate cluster: {len(_a )}' )
print(F'Unique files in duplicate cluster: {len(_a )}' )
print(F'Filtered dataset size: {len(_a )}' )
return ds_filter, duplicate_clusters
| 659 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_snake_case = {"tokenization_bertweet": ["BertweetTokenizer"]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 714 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_snake_case = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def snake_case ( _a: Any )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = test_results.split(' ' )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase__ = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def snake_case ( _a: Optional[int] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = {}
lowerCamelCase__ = None
lowerCamelCase__ = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , _a ):
lowerCamelCase__ = True
lowerCamelCase__ = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
lowerCamelCase__ = line
lowerCamelCase__ = False
return failures
class _a :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = title
lowerCamelCase__ = doc_test_results['time_spent'].split(',' )[0]
lowerCamelCase__ = doc_test_results['success']
lowerCamelCase__ = doc_test_results['failures']
lowerCamelCase__ = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase__ = doc_test_results
@property
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = [self._time_spent]
lowerCamelCase__ = 0
for time in time_spent:
lowerCamelCase__ = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
lowerCamelCase__ = [0, 0, time_parts[0]]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'
@property
def _UpperCamelCase ( self : Dict ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self : Dict ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Any ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = 40
lowerCamelCase__ = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
lowerCamelCase__ = ''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def _UpperCamelCase ( ):
lowerCamelCase__ = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[int] ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
lowerCamelCase__ = F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
lowerCamelCase__ = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = ''
for key, value in failures.items():
lowerCamelCase__ = value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value
failures_text += F'*{key}*\n_{value}_\n\n'
lowerCamelCase__ = job_name
lowerCamelCase__ = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
lowerCamelCase__ = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self : Optional[int] ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
lowerCamelCase__ = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
lowerCamelCase__ = sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
lowerCamelCase__ = F'*Num failures* :{len(job_result["failed"] )} \n'
lowerCamelCase__ = job_result['failures']
lowerCamelCase__ = self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def snake_case ( )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = os.environ['GITHUB_RUN_ID']
lowerCamelCase__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
lowerCamelCase__ = requests.get(_a ).json()
lowerCamelCase__ = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
lowerCamelCase__ = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_a ):
lowerCamelCase__ = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , _a )
return {}
def snake_case ( _a: str )-> Dict:
'''simple docstring'''
lowerCamelCase__ = {}
if os.path.exists(_a ):
lowerCamelCase__ = os.listdir(_a )
for file in files:
try:
with open(os.path.join(_a , _a ) , encoding='utf-8' ) as f:
lowerCamelCase__ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(_a , _a )}.' ) from e
return _artifact
def snake_case ( )-> Optional[int]:
'''simple docstring'''
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = name
lowerCamelCase__ = []
def __str__( self : Dict ):
return self.name
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
self.paths.append({'name': self.name, 'path': path} )
lowerCamelCase__ = {}
lowerCamelCase__ = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase__ = directory
if artifact_name not in _available_artifacts:
lowerCamelCase__ = Artifact(_a )
_available_artifacts[artifact_name].add_path(_a )
return _available_artifacts
if __name__ == "__main__":
_snake_case = get_job_links()
_snake_case = retrieve_available_artifacts()
_snake_case = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_snake_case = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_snake_case = github_actions_job_links.get("run_doctests")
_snake_case = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_snake_case = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_snake_case , _snake_case , _snake_case = handle_test_results(artifact["stats"])
_snake_case = failed
_snake_case = success
_snake_case = time_spent[1:-1] + ", "
_snake_case = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_snake_case = line.replace("FAILED ", "")
_snake_case = line.split()[0].replace("\n", "")
if "::" in line:
_snake_case , _snake_case = line.split("::")
else:
_snake_case , _snake_case = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_snake_case = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_snake_case = all_failures[test] if test in all_failures else "N/A"
_snake_case = failure
break
_snake_case = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 659 | 0 |
"""simple docstring"""
def snake_case ( _a: List[str] )-> int:
'''simple docstring'''
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if numbers[j] < numbers[i]:
lowerCamelCase__ , lowerCamelCase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 715 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[PIL.Image.Image, np.ndarray]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : PriorTransformer , SCREAMING_SNAKE_CASE__ : CLIPVisionModel , SCREAMING_SNAKE_CASE__ : CLIPImageProcessor , SCREAMING_SNAKE_CASE__ : HeunDiscreteScheduler , SCREAMING_SNAKE_CASE__ : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=SCREAMING_SNAKE_CASE__ , image_encoder=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , renderer=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
if latents is None:
lowerCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase__ = latents.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase__ = torch.device(F'cuda:{gpu_id}' )
lowerCamelCase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self : Dict ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE__ , axis=0 ) if image[0].ndim == 4 else torch.stack(SCREAMING_SNAKE_CASE__ , axis=0 )
if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
lowerCamelCase__ = image.to(dtype=self.image_encoder.dtype , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.image_encoder(SCREAMING_SNAKE_CASE__ )['last_hidden_state']
lowerCamelCase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase__ = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ = torch.zeros_like(SCREAMING_SNAKE_CASE__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 25 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : float = 4.0 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ):
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowerCamelCase__ = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = image.shape[0]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(SCREAMING_SNAKE_CASE__ )}' )
lowerCamelCase__ = self._execution_device
lowerCamelCase__ = batch_size * num_images_per_prompt
lowerCamelCase__ = guidance_scale > 1.0
lowerCamelCase__ = self._encode_image(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# prior
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.scheduler.timesteps
lowerCamelCase__ = self.prior.config.num_embeddings
lowerCamelCase__ = self.prior.config.embedding_dim
lowerCamelCase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase__ = latents.reshape(latents.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.prior(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , proj_embedding=SCREAMING_SNAKE_CASE__ , ).predicted_image_embedding
# remove the variance
lowerCamelCase__ , lowerCamelCase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 )
lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase__ = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
for i, latent in enumerate(SCREAMING_SNAKE_CASE__ ):
print()
lowerCamelCase__ = self.renderer.decode(
latent[None, :] , SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.stack(SCREAMING_SNAKE_CASE__ )
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase__ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase__ = [self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
def snake_case ( _a: List[str] )-> list[int]:
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(len(_lowerCAmelCase ) )]
# initialize interval's left pointer and right pointer
lowerCamelCase__ = 0, 0
for i in range(1 , len(_lowerCAmelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCamelCase__ = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCamelCase__ = min_edge
while go_next(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCamelCase__ = i, i + z_result[i] - 1
return z_result
def snake_case ( _a: List[Any] , _a: Optional[int] , _a: Dict )-> bool:
'''simple docstring'''
return i + z_result[i] < len(_lowerCAmelCase ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case ( _a: Optional[int] , _a: Dict )-> int:
'''simple docstring'''
lowerCamelCase__ = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCamelCase__ = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowerCAmelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_snake_case = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = VOCAB_FILES_NAMES
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = ['input_ids', 'attention_mask']
a_ : Union[str, Any] = NllbTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : Any="<mask>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=False , **SCREAMING_SNAKE_CASE__ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self : List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 659 | 0 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case ( _a: str , _a: Union[str, Any] )-> Tuple:
'''simple docstring'''
assert isinstance(_a , _a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def snake_case ( _a: Tuple , _a: int , _a: Optional[int] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = tmp_path / 'cache'
lowerCamelCase__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__ = JsonDatasetReader(_a , cache_dir=_a , keep_in_memory=_a ).read()
_check_json_dataset(_a , _a )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def snake_case ( _a: List[Any] , _a: int , _a: Dict )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = tmp_path / 'cache'
lowerCamelCase__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCamelCase__ = features.copy() if features else default_expected_features
lowerCamelCase__ = (
Features({feature: Value(_a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__ = JsonDatasetReader(_a , features=_a , cache_dir=_a ).read()
_check_json_dataset(_a , _a )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def snake_case ( _a: List[Any] , _a: Optional[int] , _a: Any )-> int:
'''simple docstring'''
lowerCamelCase__ = tmp_path / 'cache'
lowerCamelCase__ = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
lowerCamelCase__ = features.copy() if features else default_expected_features
lowerCamelCase__ = (
Features({feature: Value(_a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__ = JsonDatasetReader(_a , features=_a , cache_dir=_a ).read()
assert isinstance(_a , _a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def snake_case ( _a: Any , _a: Optional[int] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
lowerCamelCase__ = features.copy()
lowerCamelCase__ = (
Features({feature: Value(_a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__ = tmp_path / 'cache'
lowerCamelCase__ = JsonDatasetReader(_a , features=_a , cache_dir=_a ).read()
assert isinstance(_a , _a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def snake_case ( _a: List[Any] , _a: Optional[Any] , _a: Union[str, Any] )-> int:
'''simple docstring'''
lowerCamelCase__ = tmp_path / 'cache'
lowerCamelCase__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCamelCase__ = JsonDatasetReader(_a , cache_dir=_a , split=_a ).read()
_check_json_dataset(_a , _a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def snake_case ( _a: Any , _a: List[str] , _a: Optional[int] )-> Optional[int]:
'''simple docstring'''
if issubclass(_a , _a ):
lowerCamelCase__ = jsonl_path
elif issubclass(_a , _a ):
lowerCamelCase__ = [jsonl_path]
lowerCamelCase__ = tmp_path / 'cache'
lowerCamelCase__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCamelCase__ = JsonDatasetReader(_a , cache_dir=_a ).read()
_check_json_dataset(_a , _a )
def snake_case ( _a: Optional[int] , _a: int , _a: str=("train",) )-> Union[str, Any]:
'''simple docstring'''
assert isinstance(_a , _a )
for split in splits:
lowerCamelCase__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def snake_case ( _a: Dict , _a: int , _a: List[str] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = tmp_path / 'cache'
lowerCamelCase__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__ = JsonDatasetReader({'train': jsonl_path} , cache_dir=_a , keep_in_memory=_a ).read()
_check_json_datasetdict(_a , _a )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def snake_case ( _a: int , _a: Any , _a: Dict )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = tmp_path / 'cache'
lowerCamelCase__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCamelCase__ = features.copy() if features else default_expected_features
lowerCamelCase__ = (
Features({feature: Value(_a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__ = JsonDatasetReader({'train': jsonl_path} , features=_a , cache_dir=_a ).read()
_check_json_datasetdict(_a , _a )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def snake_case ( _a: int , _a: Optional[int] , _a: int )-> Dict:
'''simple docstring'''
if split:
lowerCamelCase__ = {split: jsonl_path}
else:
lowerCamelCase__ = 'train'
lowerCamelCase__ = {'train': jsonl_path, 'test': jsonl_path}
lowerCamelCase__ = tmp_path / 'cache'
lowerCamelCase__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCamelCase__ = JsonDatasetReader(_a , cache_dir=_a ).read()
_check_json_datasetdict(_a , _a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def snake_case ( _a: Optional[Any] )-> List[Any]:
'''simple docstring'''
return json.load(_a )
def snake_case ( _a: Tuple )-> Dict:
'''simple docstring'''
return [json.loads(_a ) for line in buffer]
class _a :
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
lowerCamelCase__ = load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
lowerCamelCase__ = load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ):
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
lowerCamelCase__ = load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str ):
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
lowerCamelCase__ = load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 10
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = tmp_path_factory.mktemp('data' ) / F'test.json.{extension}'
lowerCamelCase__ = str(shared_datadir / F'test_file.json.{extension}' )
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compression=SCREAMING_SNAKE_CASE__ ).write()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
lowerCamelCase__ = f.read()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
lowerCamelCase__ = f.read()
assert exported_content == original_content
| 717 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=99 , SCREAMING_SNAKE_CASE__ : Optional[Any]=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=37 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=5_12 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Any=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
lowerCamelCase__ = bos_token_id
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase__ = input_mask.numpy()
lowerCamelCase__ , lowerCamelCase__ = input_mask.shape
lowerCamelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = 1
lowerCamelCase__ = 0
lowerCamelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFBlipTextModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : str = (TFBlipTextModel,) if is_tf_available() else ()
a_ : List[str] = False
a_ : Optional[Any] = False
a_ : Union[str, Any] = False
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = BlipTextModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
pass
def _UpperCamelCase ( self : Tuple ):
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCamelCase ( self : List[str] ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : Dict ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : List[Any] ):
pass
@slow
def _UpperCamelCase ( self : str ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
_snake_case = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
_snake_case = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class _a ( UpperCAmelCase__ ):
a_ : List[str] = VOCAB_FILES_NAMES
a_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : str = PRETRAINED_VOCAB_FILES_MAP
a_ : Any = ["input_ids", "attention_mask"]
a_ : int = MBartTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : int="<s>" , SCREAMING_SNAKE_CASE__ : str="</s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : str="<s>" , SCREAMING_SNAKE_CASE__ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : Dict="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="<mask>" , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : str , ):
lowerCamelCase__ = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
vocab_file=lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , src_lang=lowerCamelCase__ , tgt_lang=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(lowerCamelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else "en_XX"
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : str ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
lowerCamelCase__ = self.convert_tokens_to_ids(lowerCamelCase__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "en_XX" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "ro_RO" , **SCREAMING_SNAKE_CASE__ : List[Any] , ):
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase ( self : Optional[Any] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self : Union[str, Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self.convert_tokens_to_ids(lowerCamelCase__ )
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.convert_tokens_to_ids(lowerCamelCase__ )
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 718 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 659 | 0 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 650, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : Union[str, Any] ):
if self.framework == "pytorch":
subprocess.run(
F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=lowerCAmelCase_ , )
assert hasattr(self , 'env' )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = F'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
lowerCamelCase__ = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=lowerCAmelCase_ , instance_count=lowerCAmelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase_ , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase_ , py_version='py36' , )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ):
TrainingJobAnalytics(lowerCAmelCase_ ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
lowerCamelCase__ = self.create_estimator(lowerCAmelCase_ )
# run training
estimator.fit()
# result dataframe
lowerCamelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCamelCase__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
lowerCamelCase__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCamelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'{estimator.latest_training_job.name}.json' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , lowerCAmelCase_ )
| 719 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
_snake_case = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {f"""funnel-transformer/{name}""": 512 for name in _model_names}
_snake_case = {f"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = VOCAB_FILES_NAMES
a_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
a_ : List[str] = FunnelTokenizer
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : int = 2
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : List[Any]="<sep>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : Tuple="<cls>" , SCREAMING_SNAKE_CASE__ : Tuple="<mask>" , SCREAMING_SNAKE_CASE__ : Any="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : int="##" , **SCREAMING_SNAKE_CASE__ : Any , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , clean_text=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , wordpieces_prefix=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('type' ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = do_lower_case
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
lowerCamelCase__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class _a ( __lowercase ):
a_ : str = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
a_ : ClassVar[Features] = Features({'text': Value('string' )} )
a_ : ClassVar[Features] = Features({'labels': ClassLabel} )
a_ : str = "text"
a_ : str = "labels"
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __a ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
lowerCamelCase__ = copy.deepcopy(self )
lowerCamelCase__ = self.label_schema.copy()
lowerCamelCase__ = features[self.label_column]
lowerCamelCase__ = label_schema
return task_template
@property
def _UpperCamelCase ( self : List[str] ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 720 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case ( _a: Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = [-1] * len(_a )
def dfs(_a: Any , _a: Optional[int] ):
lowerCamelCase__ = True
lowerCamelCase__ = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 659 | 0 |
"""simple docstring"""
def snake_case ( _a: int , _a: int )-> Optional[int]:
'''simple docstring'''
return 1 if input_a == input_a else 0
def snake_case ( )-> List[Any]:
'''simple docstring'''
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 721 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_snake_case = TypeVar("KEY")
_snake_case = TypeVar("VAL")
@dataclass(frozen=SCREAMING_SNAKE_CASE_ , slots=SCREAMING_SNAKE_CASE_ )
class _a ( Generic[KEY, VAL] ):
a_ : KEY
a_ : VAL
class _a ( _Item ):
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : str ):
return False
_snake_case = _DeletedItem()
class _a ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ):
lowerCamelCase__ = initial_block_size
lowerCamelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ = capacity_factor
lowerCamelCase__ = 0
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY ):
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int ):
return (ind + 1) % len(self._buckets )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
lowerCamelCase__ = self._buckets[ind]
if not stored:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self._buckets
lowerCamelCase__ = [None] * new_size
lowerCamelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _UpperCamelCase ( self : List[str] ):
self._resize(len(self._buckets ) * 2 )
def _UpperCamelCase ( self : Optional[int] ):
self._resize(len(self._buckets ) // 2 )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ):
lowerCamelCase__ = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : List[Any] ):
return self._len
def __iter__( self : Optional[int] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowerCamelCase__ = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 659 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
_snake_case = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
_snake_case = {
"RUCAIBox/mvp": 1024,
}
class _a ( __a ):
a_ : Union[str, Any] = VOCAB_FILES_NAMES
a_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] = ['input_ids', 'attention_mask']
a_ : Optional[Any] = MvpTokenizer
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[str]="replace" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : List[Any]="</s>" , SCREAMING_SNAKE_CASE__ : str="</s>" , SCREAMING_SNAKE_CASE__ : Any="<s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : int="<mask>" , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , **SCREAMING_SNAKE_CASE__ : Tuple , ):
super().__init__(
A__ , A__ , tokenizer_file=A__ , errors=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , add_prefix_space=A__ , trim_offsets=A__ , **A__ , )
lowerCamelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , A__ ) != add_prefix_space:
lowerCamelCase__ = getattr(A__ , pre_tok_state.pop('type' ) )
lowerCamelCase__ = add_prefix_space
lowerCamelCase__ = pre_tok_class(**A__ )
lowerCamelCase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCamelCase__ = 'post_processor'
lowerCamelCase__ = getattr(self.backend_tokenizer , A__ , A__ )
if tokenizer_component_instance:
lowerCamelCase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase__ = tuple(state['sep'] )
if "cls" in state:
lowerCamelCase__ = tuple(state['cls'] )
lowerCamelCase__ = False
if state.get('add_prefix_space' , A__ ) != add_prefix_space:
lowerCamelCase__ = add_prefix_space
lowerCamelCase__ = True
if state.get('trim_offsets' , A__ ) != trim_offsets:
lowerCamelCase__ = trim_offsets
lowerCamelCase__ = True
if changes_to_apply:
lowerCamelCase__ = getattr(A__ , state.pop('type' ) )
lowerCamelCase__ = component_class(**A__ )
setattr(self.backend_tokenizer , A__ , A__ )
@property
def _UpperCamelCase ( self : Dict ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else value
lowerCamelCase__ = value
def _UpperCamelCase ( self : List[str] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = kwargs.get('is_split_into_words' , A__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*A__ , **A__ )
def _UpperCamelCase ( self : List[Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = kwargs.get('is_split_into_words' , A__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._encode_plus(*A__ , **A__ )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] = None ):
lowerCamelCase__ = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any=None ):
lowerCamelCase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 700 |
"""simple docstring"""
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations(_a: int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a: int , _a: list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase__ = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
lowerCamelCase__ = answer
return answer
lowerCamelCase__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = [0] * (target + 1)
lowerCamelCase__ = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 659 | 0 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def snake_case ( _a: List[str] , _a: Optional[Any]=False )-> int:
'''simple docstring'''
try:
lowerCamelCase__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCamelCase__ = default
else:
# KEY is set, convert it to True or False.
try:
lowerCamelCase__ = strtobool(_SCREAMING_SNAKE_CASE )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
_snake_case = parse_flag_from_env("RUN_SLOW", default=False)
_snake_case = parse_flag_from_env("RUN_REMOTE", default=False)
_snake_case = parse_flag_from_env("RUN_LOCAL", default=True)
_snake_case = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
_snake_case = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
_snake_case = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
_snake_case = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
_snake_case = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
_snake_case = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
_snake_case = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
_snake_case = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def snake_case ( _a: Optional[int] )-> List[Any]:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
lowerCamelCase__ = unittest.skip('test requires faiss' )(_SCREAMING_SNAKE_CASE )
return test_case
def snake_case ( _a: Optional[int] )-> List[str]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
lowerCamelCase__ = unittest.skip('test requires regex' )(_SCREAMING_SNAKE_CASE )
return test_case
def snake_case ( _a: int )-> str:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
lowerCamelCase__ = unittest.skip('test requires elasticsearch' )(_SCREAMING_SNAKE_CASE )
return test_case
def snake_case ( _a: Dict )-> Dict:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
lowerCamelCase__ = unittest.skip('test requires sqlalchemy' )(_SCREAMING_SNAKE_CASE )
return test_case
def snake_case ( _a: Optional[Any] )-> Optional[int]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
lowerCamelCase__ = unittest.skip('test requires PyTorch' )(_SCREAMING_SNAKE_CASE )
return test_case
def snake_case ( _a: List[Any] )-> Dict:
'''simple docstring'''
if not config.TF_AVAILABLE:
lowerCamelCase__ = unittest.skip('test requires TensorFlow' )(_SCREAMING_SNAKE_CASE )
return test_case
def snake_case ( _a: List[Any] )-> Optional[int]:
'''simple docstring'''
if not config.JAX_AVAILABLE:
lowerCamelCase__ = unittest.skip('test requires JAX' )(_SCREAMING_SNAKE_CASE )
return test_case
def snake_case ( _a: Optional[Any] )-> str:
'''simple docstring'''
if not config.PIL_AVAILABLE:
lowerCamelCase__ = unittest.skip('test requires Pillow' )(_SCREAMING_SNAKE_CASE )
return test_case
def snake_case ( _a: Tuple )-> int:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(_SCREAMING_SNAKE_CASE )
else:
return test_case
def snake_case ( _a: Optional[Any] )-> List[str]:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(_SCREAMING_SNAKE_CASE )
else:
return test_case
def snake_case ( _a: Optional[Any] )-> Any:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(_SCREAMING_SNAKE_CASE )
else:
return test_case
def snake_case ( _a: Optional[int] )-> Dict:
'''simple docstring'''
def _require_spacy_model(_a: Dict ):
try:
import spacy # noqa F401
spacy.load(_SCREAMING_SNAKE_CASE )
except ImportError:
return unittest.skip('test requires spacy' )(_SCREAMING_SNAKE_CASE )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(_SCREAMING_SNAKE_CASE ) )(_SCREAMING_SNAKE_CASE )
else:
return test_case
return _require_spacy_model
def snake_case ( _a: Tuple )-> List[Any]:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(_SCREAMING_SNAKE_CASE )
else:
return test_case
def snake_case ( _a: Tuple )-> int:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(_SCREAMING_SNAKE_CASE )
else:
return test_case
def snake_case ( _a: str )-> Optional[Any]:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
lowerCamelCase__ = unittest.skip('test is slow' )(_SCREAMING_SNAKE_CASE )
return test_case
def snake_case ( _a: Optional[int] )-> Tuple:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
lowerCamelCase__ = unittest.skip('test is local' )(_SCREAMING_SNAKE_CASE )
return test_case
def snake_case ( _a: str )-> List[str]:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
lowerCamelCase__ = unittest.skip('test is packaged' )(_SCREAMING_SNAKE_CASE )
return test_case
def snake_case ( _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
lowerCamelCase__ = unittest.skip('test requires remote' )(_SCREAMING_SNAKE_CASE )
return test_case
def snake_case ( *_a: Tuple )-> Tuple:
'''simple docstring'''
def decorate(cls: str ):
for name, fn in cls.__dict__.items():
if callable(_SCREAMING_SNAKE_CASE ) and name.startswith('test' ):
for decorator in decorators:
lowerCamelCase__ = decorator(_SCREAMING_SNAKE_CASE )
setattr(cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return cls
return decorate
class _a ( SCREAMING_SNAKE_CASE_ ):
pass
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[Any] = 0
a_ : Any = 1
a_ : Optional[Any] = 2
@contextmanager
def snake_case ( _a: Optional[int]=OfflineSimulationMode.CONNECTION_FAILS , _a: int=1E-16 )-> Tuple:
'''simple docstring'''
lowerCamelCase__ = requests.Session().request
def timeout_request(_a: Dict , _a: Optional[Any] , _a: Optional[int] , **_a: Optional[int] ):
# Change the url to an invalid url so that the connection hangs
lowerCamelCase__ = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
lowerCamelCase__ = timeout
try:
return online_request(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowerCamelCase__ = url
lowerCamelCase__ = e.args[0]
lowerCamelCase__ = (max_retry_error.args[0].replace('10.255.255.1' , F'OfflineMock[{url}]' ),)
lowerCamelCase__ = (max_retry_error,)
raise
def raise_connection_error(_a: Optional[Any] , _a: Dict , **_a: str ):
raise requests.ConnectionError('Offline mode is enabled.' , request=_SCREAMING_SNAKE_CASE )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , _SCREAMING_SNAKE_CASE ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , _SCREAMING_SNAKE_CASE ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , _SCREAMING_SNAKE_CASE ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def snake_case ( *_a: Union[str, Any] , **_a: Dict )-> Dict:
'''simple docstring'''
lowerCamelCase__ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) as tmp_dir:
try:
os.chdir(_SCREAMING_SNAKE_CASE )
yield
finally:
os.chdir(_SCREAMING_SNAKE_CASE )
@contextmanager
def snake_case ( )-> str:
'''simple docstring'''
import gc
gc.collect()
lowerCamelCase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def snake_case ( )-> Tuple:
'''simple docstring'''
import gc
gc.collect()
lowerCamelCase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def snake_case ( _a: List[Any] , _a: Union[str, Any] )-> Dict:
'''simple docstring'''
return deepcopy(_SCREAMING_SNAKE_CASE ).integers(0 , 100 , 10 ).tolist() == deepcopy(_SCREAMING_SNAKE_CASE ).integers(0 , 100 , 10 ).tolist()
def snake_case ( _a: List[Any] )-> List[str]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(_a: Union[str, Any] , *_a: Any , **_a: Dict ):
try:
return func(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
except HTTPError as err:
if str(_SCREAMING_SNAKE_CASE ).startswith('500' ) or str(_SCREAMING_SNAKE_CASE ).startswith('502' ):
pytest.xfail(str(_SCREAMING_SNAKE_CASE ) )
raise err
return decorator.decorator(_wrapper , _SCREAMING_SNAKE_CASE )
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ):
lowerCamelCase__ = returncode
lowerCamelCase__ = stdout
lowerCamelCase__ = stderr
async def snake_case ( _a: Any , _a: Optional[Any] )-> List[Any]:
'''simple docstring'''
while True:
lowerCamelCase__ = await stream.readline()
if line:
callback(_SCREAMING_SNAKE_CASE )
else:
break
async def snake_case ( _a: str , _a: Dict=None , _a: int=None , _a: Any=None , _a: int=False , _a: Any=False )-> _RunOutput:
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_SCREAMING_SNAKE_CASE , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_SCREAMING_SNAKE_CASE , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCamelCase__ = []
lowerCamelCase__ = []
def tee(_a: Dict , _a: Any , _a: Optional[int] , _a: int="" ):
lowerCamelCase__ = line.decode('utf-8' ).rstrip()
sink.append(_SCREAMING_SNAKE_CASE )
if not quiet:
print(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , file=_SCREAMING_SNAKE_CASE )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _a : tee(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda _a : tee(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , sys.stderr , label='stderr:' ) ),
] , timeout=_SCREAMING_SNAKE_CASE , )
return _RunOutput(await p.wait() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case ( _a: Optional[int] , _a: Optional[Any]=None , _a: Tuple=None , _a: List[Any]=180 , _a: List[str]=False , _a: List[str]=True )-> _RunOutput:
'''simple docstring'''
lowerCamelCase__ = asyncio.get_event_loop()
lowerCamelCase__ = loop.run_until_complete(
_stream_subprocess(_SCREAMING_SNAKE_CASE , env=_SCREAMING_SNAKE_CASE , stdin=_SCREAMING_SNAKE_CASE , timeout=_SCREAMING_SNAKE_CASE , quiet=_SCREAMING_SNAKE_CASE , echo=_SCREAMING_SNAKE_CASE ) )
lowerCamelCase__ = ' '.join(_SCREAMING_SNAKE_CASE )
if result.returncode > 0:
lowerCamelCase__ = '\n'.join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'\'{cmd_str}\' produced no output.' )
return result
def snake_case ( )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
lowerCamelCase__ = re.sub(R'^gw' , '' , _SCREAMING_SNAKE_CASE , 0 , re.M )
return int(_SCREAMING_SNAKE_CASE )
def snake_case ( )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = 29500
lowerCamelCase__ = pytest_xdist_worker_id()
return port + uniq_delta
| 701 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 0 |
"""simple docstring"""
def snake_case ( _a: Dict )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = []
for data in source_data:
for i, el in enumerate(lowercase__ ):
if len(lowercase__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(lowercase__ ) )
return data_lists
def snake_case ( _a: Tuple , _a: List[Any] )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = []
for dlist, weight in zip(lowercase__ , lowercase__ ):
lowerCamelCase__ = min(lowercase__ )
lowerCamelCase__ = max(lowercase__ )
lowerCamelCase__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCamelCase__ = F'Invalid weight of {weight:f} provided'
raise ValueError(lowercase__ )
score_lists.append(lowercase__ )
return score_lists
def snake_case ( _a: Dict )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(lowercase__ ):
lowerCamelCase__ = final_scores[j] + ele
return final_scores
def snake_case ( _a: Any , _a: Optional[int] )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = get_data(lowercase__ )
lowerCamelCase__ = calculate_each_score(lowercase__ , lowercase__ )
lowerCamelCase__ = generate_final_scores(lowercase__ )
# append scores to source data
for i, ele in enumerate(lowercase__ ):
source_data[i].append(lowercase__ )
return source_data
| 702 |
"""simple docstring"""
def snake_case ( _a: list[list[float]] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for data in source_data:
for i, el in enumerate(_a ):
if len(_a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_a ) )
return data_lists
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for dlist, weight in zip(_a , _a ):
lowerCamelCase__ = min(_a )
lowerCamelCase__ = max(_a )
lowerCamelCase__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCamelCase__ = F'Invalid weight of {weight:f} provided'
raise ValueError(_a )
score_lists.append(_a )
return score_lists
def snake_case ( _a: list[list[float]] )-> list[float]:
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_a ):
lowerCamelCase__ = final_scores[j] + ele
return final_scores
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = get_data(_a )
lowerCamelCase__ = calculate_each_score(_a , _a )
lowerCamelCase__ = generate_final_scores(_a )
# append scores to source data
for i, ele in enumerate(_a ):
source_data[i].append(_a )
return source_data
| 659 | 0 |
"""simple docstring"""
def snake_case ( _a: Any = 50 )-> Any:
'''simple docstring'''
lowerCamelCase__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 703 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def snake_case ( _a: int , _a: int = 2 , _a: int = 1 , _a: int = 3 , )-> int | None:
'''simple docstring'''
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_a: int , _a: int , _a: int ) -> int:
return (pow(_a , 2 ) + step) % modulus
for _ in range(_a ):
# These track the position within the cycle detection logic.
lowerCamelCase__ = seed
lowerCamelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase__ = gcd(hare - tortoise , _a )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
_snake_case = parser.parse_args()
_snake_case = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
_snake_case = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 659 | 0 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_snake_case = sys.version_info >= (3, 10)
def snake_case ( _a: Dict=None , _a: Any=None )-> Optional[int]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class _a :
a_ : int
a_ : float
a_ : str
a_ : bool
@dataclass
class _a :
a_ : int = 42
a_ : str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class _a :
a_ : bool = False
a_ : bool = True
a_ : Optional[bool] = None
class _a ( _UpperCAmelCase ):
a_ : int = """titi"""
a_ : str = """toto"""
class _a ( _UpperCAmelCase ):
a_ : int = """titi"""
a_ : Dict = """toto"""
a_ : Tuple = 42
@dataclass
class _a :
a_ : BasicEnum = "toto"
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = BasicEnum(self.foo )
@dataclass
class _a :
a_ : MixedTypeEnum = "toto"
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = MixedTypeEnum(self.foo )
@dataclass
class _a :
a_ : Optional[int] = None
a_ : Optional[float] = field(default=_UpperCAmelCase , metadata={'help': 'help message'} )
a_ : Optional[str] = None
a_ : Optional[List[str]] = list_field(default=[] )
a_ : Optional[List[int]] = list_field(default=[] )
@dataclass
class _a :
a_ : List[int] = list_field(default=[] )
a_ : List[int] = list_field(default=[1, 2, 3] )
a_ : List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
a_ : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _a :
a_ : List[int] = field()
a_ : str = field()
a_ : BasicEnum = field()
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = BasicEnum(self.required_enum )
@dataclass
class _a :
a_ : int
a_ : "BasicEnum" = field()
a_ : "Optional[bool]" = None
a_ : "str" = field(default='toto' , metadata={'help': 'help message'} )
a_ : "List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class _a :
a_ : bool = False
a_ : bool = True
a_ : bool | None = None
@dataclass
class _a :
a_ : int | None = None
a_ : float | None = field(default=_UpperCAmelCase , metadata={'help': 'help message'} )
a_ : str | None = None
a_ : list[str] | None = list_field(default=[] )
a_ : list[int] | None = list_field(default=[] )
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowerCamelCase__ = {k: v for k, v in vars(lowercase__ ).items() if k != """container"""}
lowerCamelCase__ = {k: v for k, v in vars(lowercase__ ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , lowercase__ ) and yy.get('choices' , lowercase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](lowercase__ ) , yy['type'](lowercase__ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase__ , lowercase__ )
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = HfArgumentParser(lowercase__ )
lowerCamelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase__ , required=lowercase__ )
expected.add_argument('--bar' , type=lowercase__ , required=lowercase__ )
expected.add_argument('--baz' , type=lowercase__ , required=lowercase__ )
expected.add_argument('--flag' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='?' )
self.argparsersEqual(lowercase__ , lowercase__ )
lowerCamelCase__ = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
(lowerCamelCase__ ) = parser.parse_args_into_dataclasses(lowercase__ , look_for_args_file=lowercase__ )
self.assertFalse(example.flag )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = HfArgumentParser(lowercase__ )
lowerCamelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=lowercase__ )
expected.add_argument('--baz' , default='toto' , type=lowercase__ , help='help message' )
self.argparsersEqual(lowercase__ , lowercase__ )
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='?' )
expected.add_argument('--baz' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=lowercase__ , dest='baz' )
expected.add_argument('--opt' , type=lowercase__ , default=lowercase__ )
lowerCamelCase__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
lowerCamelCase__ = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
lowerCamelCase__ = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
lowerCamelCase__ = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
lowerCamelCase__ = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
lowerCamelCase__ = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
lowerCamelCase__ = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = HfArgumentParser(lowercase__ )
lowerCamelCase__ = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
lowerCamelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
lowerCamelCase__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowerCamelCase__ = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
lowerCamelCase__ = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowerCamelCase__ = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
lowerCamelCase__ = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _UpperCamelCase ( self : Any ):
@dataclass
class _a :
a_ : Literal["titi", "toto", 42] = "toto"
lowerCamelCase__ = HfArgumentParser(lowercase__ )
lowerCamelCase__ = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
lowerCamelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
lowerCamelCase__ = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
lowerCamelCase__ = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = HfArgumentParser(lowercase__ )
lowerCamelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=lowercase__ )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=lowercase__ )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase__ )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
lowerCamelCase__ = parser.parse_args([] )
self.assertEqual(
lowercase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
lowerCamelCase__ = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(lowercase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , default=lowercase__ , type=lowercase__ )
expected.add_argument('--bar' , default=lowercase__ , type=lowercase__ , help='help message' )
expected.add_argument('--baz' , default=lowercase__ , type=lowercase__ )
expected.add_argument('--ces' , nargs='+' , default=[] , type=lowercase__ )
expected.add_argument('--des' , nargs='+' , default=[] , type=lowercase__ )
lowerCamelCase__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
lowerCamelCase__ = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
lowerCamelCase__ = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , bar=lowercase__ , baz=lowercase__ , ces=[] , des=[] ) )
lowerCamelCase__ = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(lowercase__ , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = HfArgumentParser(lowercase__ )
lowerCamelCase__ = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=lowercase__ , required=lowercase__ )
expected.add_argument('--required_str' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=lowercase__ , )
self.argparsersEqual(lowercase__ , lowercase__ )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = HfArgumentParser(lowercase__ )
lowerCamelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=lowercase__ , )
expected.add_argument('--opt' , type=lowercase__ , default=lowercase__ )
expected.add_argument('--baz' , default='toto' , type=lowercase__ , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = HfArgumentParser(lowercase__ )
lowerCamelCase__ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
lowerCamelCase__ = parser.parse_dict(lowercase__ )[0]
lowerCamelCase__ = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = HfArgumentParser(lowercase__ )
lowerCamelCase__ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(lowercase__ , parser.parse_dict , lowercase__ , allow_extra_keys=lowercase__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = HfArgumentParser(lowercase__ )
lowerCamelCase__ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = os.path.join(lowercase__ , 'temp_json' )
os.mkdir(lowercase__ )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(lowercase__ , lowercase__ )
lowerCamelCase__ = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
lowerCamelCase__ = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = HfArgumentParser(lowercase__ )
lowerCamelCase__ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = os.path.join(lowercase__ , 'temp_yaml' )
os.mkdir(lowercase__ )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(lowercase__ , lowercase__ )
lowerCamelCase__ = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
lowerCamelCase__ = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = HfArgumentParser(lowercase__ )
self.assertIsNotNone(lowercase__ )
| 704 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 659 | 0 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_snake_case = imread(R"digital_image_processing/image_data/lena_small.jpg")
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
def snake_case ( )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = cn.convert_to_negative(a_ )
# assert negative_img array for at least one True
assert negative_img.any()
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def snake_case ( )-> Any:
'''simple docstring'''
lowerCamelCase__ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase__ = canny.canny(a_ )
# assert canny array for at least one True
assert canny_array.any()
def snake_case ( )-> Dict:
'''simple docstring'''
assert gg.gaussian_filter(a_ , 5 , sigma=0.9 ).all()
def snake_case ( )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCamelCase__ = conv.img_convolve(a_ , a_ ).astype(a_ )
assert res.any()
def snake_case ( )-> Optional[int]:
'''simple docstring'''
assert med.median_filter(a_ , 3 ).any()
def snake_case ( )-> Any:
'''simple docstring'''
lowerCamelCase__ = sob.sobel_filter(a_ )
assert grad.any() and theta.any()
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = sp.make_sepia(a_ , 20 )
assert sepia.all()
def snake_case ( _a: str = "digital_image_processing/image_data/lena_small.jpg" )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = bs.Burkes(imread(a_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def snake_case ( _a: str = "digital_image_processing/image_data/lena_small.jpg" , )-> Any:
'''simple docstring'''
lowerCamelCase__ = rs.NearestNeighbour(imread(a_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def snake_case ( )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowerCamelCase__ = imread(a_ , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = image[x_coordinate][y_coordinate]
lowerCamelCase__ = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase__ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCamelCase__ = lbp.local_binary_value(a_ , a_ , a_ )
assert lbp_image.any()
| 705 |
"""simple docstring"""
from __future__ import annotations
_snake_case = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def snake_case ( _a: list[list[int]] , _a: list[int] , _a: list[int] , _a: int , _a: list[list[int]] , )-> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the reference grid
lowerCamelCase__ = 1
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the action grid
lowerCamelCase__ = init[0]
lowerCamelCase__ = init[1]
lowerCamelCase__ = 0
lowerCamelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase__ = [[f, g, x, y]]
lowerCamelCase__ = False # flag that is set when search is complete
lowerCamelCase__ = False # flag set if we can't find expand
while not found and not resign:
if len(_a ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase__ = cell.pop()
lowerCamelCase__ = next_cell[2]
lowerCamelCase__ = next_cell[3]
lowerCamelCase__ = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase__ = True
else:
for i in range(len(_a ) ): # to try out different valid actions
lowerCamelCase__ = x + DIRECTIONS[i][0]
lowerCamelCase__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_a ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase__ = g + cost
lowerCamelCase__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase__ = 1
lowerCamelCase__ = i
lowerCamelCase__ = []
lowerCamelCase__ = goal[0]
lowerCamelCase__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase__ = x - DIRECTIONS[action[x][y]][0]
lowerCamelCase__ = y - DIRECTIONS[action[x][y]][1]
lowerCamelCase__ = xa
lowerCamelCase__ = ya
invpath.append([x, y] )
lowerCamelCase__ = []
for i in range(len(_a ) ):
path.append(invpath[len(_a ) - 1 - i] )
return path, action
if __name__ == "__main__":
_snake_case = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_snake_case = [0, 0]
# all coordinates are given in format [y,x]
_snake_case = [len(grid) - 1, len(grid[0]) - 1]
_snake_case = 1
# the cost map which pushes the path closer to the goal
_snake_case = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_snake_case = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_snake_case = 99
_snake_case , _snake_case = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 659 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["""PerceiverFeatureExtractor"""]
_snake_case = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 706 |
"""simple docstring"""
def snake_case ( _a: int = 4000000 )-> int:
'''simple docstring'''
lowerCamelCase__ = [0, 1]
lowerCamelCase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase__ = 0
for j in range(len(_a ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 659 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _a ( _UpperCamelCase , _UpperCamelCase ):
a_ : Tuple = "nat"
a_ : Tuple = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=64 , SCREAMING_SNAKE_CASE__ : Any=[3, 4, 6, 5] , SCREAMING_SNAKE_CASE__ : Optional[int]=[2, 4, 8, 16] , SCREAMING_SNAKE_CASE__ : List[Any]=7 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3.0 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : int=1e-5 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , **SCREAMING_SNAKE_CASE__ : Dict , ):
super().__init__(**__a )
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(__a )
lowerCamelCase__ = num_heads
lowerCamelCase__ = kernel_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(__a ) - 1) )
lowerCamelCase__ = layer_scale_init_value
lowerCamelCase__ = ["stem"] + [F'stage{idx}' for idx in range(1 , len(__a ) + 1 )]
lowerCamelCase__ = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
| 707 |
"""simple docstring"""
def snake_case ( _a: List[Any] , _a: Any , _a: str , _a: List[Any] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = []
queue.append(_a )
lowerCamelCase__ = True
while queue:
lowerCamelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
lowerCamelCase__ = True
lowerCamelCase__ = u
return visited[t]
def snake_case ( _a: List[Any] , _a: str , _a: List[str] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = [-1] * (len(_a ))
lowerCamelCase__ = 0
while bfs(_a , _a , _a , _a ):
lowerCamelCase__ = float('Inf' )
lowerCamelCase__ = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ = min(_a , graph[parent[s]][s] )
lowerCamelCase__ = parent[s]
max_flow += path_flow
lowerCamelCase__ = sink
while v != source:
lowerCamelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ = parent[v]
return max_flow
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_snake_case , _snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 659 | 0 |
"""simple docstring"""
def snake_case ( _a: Optional[int] , _a: int )-> tuple[float, float]:
'''simple docstring'''
if not len(__UpperCamelCase ) == len(__UpperCamelCase ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = equationa
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = equationa
# Calculate the determinants of the matrices
lowerCamelCase__ = aa * ba - aa * ba
lowerCamelCase__ = ca * ba - ca * ba
lowerCamelCase__ = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
lowerCamelCase__ = determinant_x / determinant
lowerCamelCase__ = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 708 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_snake_case = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[int]=1 ):
lowerCamelCase__ = tokenizer
lowerCamelCase__ = dataset
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ ) if n_tasks is None else n_tasks
lowerCamelCase__ = n_copies
def __iter__( self : Any ):
lowerCamelCase__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = start_length
lowerCamelCase__ = eof_strings
lowerCamelCase__ = tokenizer
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: List[Any] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = re.split('(%s)' % '|'.join(_a ) , _a )
# last string should be ""
return "".join(string_list[:-2] )
def snake_case ( _a: List[Any] , _a: Optional[int] , _a: str , _a: Union[str, Any] , _a: Dict , _a: Optional[int]=20 , **_a: Optional[int] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = defaultdict(_a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_a ) ):
with torch.no_grad():
lowerCamelCase__ = batch['ids'].shape[-1]
lowerCamelCase__ = accelerator.unwrap_model(_a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_a , **_a )
# each task is generated batch_size times
lowerCamelCase__ = batch['task_id'].repeat(_a )
lowerCamelCase__ = accelerator.pad_across_processes(
_a , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ = generated_tokens.cpu().numpy()
lowerCamelCase__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_a , _a ):
gen_token_dict[task].append(_a )
lowerCamelCase__ = [[] for _ in range(_a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ = tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
code_gens[task].append(remove_last_block(_a ) )
return code_gens
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser(_a )
lowerCamelCase__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ = 'false'
if args.num_workers is None:
lowerCamelCase__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ = Accelerator()
set_seed(args.seed , device_specific=_a )
# Load model and tokenizer
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ = tokenizer.eos_token
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _a , _a )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ = load_dataset('openai_humaneval' )
lowerCamelCase__ = load_metric('code_eval' )
lowerCamelCase__ = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ = args.n_samples // args.batch_size
lowerCamelCase__ = TokenizedDataset(_a , human_eval['test'] , n_copies=_a , n_tasks=_a )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ = DataLoader(_a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(_a , _a )
lowerCamelCase__ = complete_code(
_a , _a , _a , _a , n_tasks=_a , batch_size=args.batch_size , **_a , )
if accelerator.is_main_process:
lowerCamelCase__ = []
for task in tqdm(range(_a ) ):
lowerCamelCase__ = human_eval['test'][task]['test']
lowerCamelCase__ = F'check({human_eval["test"][task]["entry_point"]})'
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ = code_eval_metric.compute(
references=_a , predictions=_a , num_workers=args.num_workers )
print(F'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_a , _a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_snake_case = logging.getLogger(__name__)
_snake_case = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _a :
a_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
a_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , )
a_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
a_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a_ : bool = field(
default=_lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a_ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a_ : bool = field(
default=_lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def _UpperCamelCase ( self : List[str] ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class _a :
a_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
a_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
a_ : Optional[str] = field(default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
a_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
a_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
a_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
a_ : bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
a_ : Optional[int] = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
a_ : Optional[int] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
a_ : Optional[int] = field(
default=_lowerCAmelCase , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
a_ : float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
a_ : bool = field(
default=_lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def _UpperCamelCase ( self : str ):
if self.train_file is not None:
lowerCamelCase__ = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase__ = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def snake_case ( _a: Any , _a: Tuple )-> Optional[Any]:
'''simple docstring'''
with open(_a , 'r' , encoding='utf-8' ) as f:
lowerCamelCase__ = [json.loads(_a ) for line in f.read().splitlines() if (len(_a ) > 0 and not line.isspace())]
assert len(_a ) == len(_a )
lowerCamelCase__ = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase__ = refs
return Dataset.from_dict(_a )
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _a )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowerCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[:{data_args.validation_split_percentage}%]' , )
lowerCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[{data_args.validation_split_percentage}%:]' , )
else:
lowerCamelCase__ = {}
if data_args.train_file is not None:
lowerCamelCase__ = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase__ = data_args.validation_file
lowerCamelCase__ = data_args.train_file.split('.' )[-1]
if extension == "txt":
lowerCamelCase__ = 'text'
lowerCamelCase__ = load_dataset(_a , data_files=_a )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase__ = AutoConfig.from_pretrained(model_args.config_name , **_a )
elif model_args.model_name_or_path:
lowerCamelCase__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **_a )
else:
lowerCamelCase__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
lowerCamelCase__ = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_a )
elif model_args.model_name_or_path:
lowerCamelCase__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_a )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
lowerCamelCase__ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
lowerCamelCase__ = AutoModelForMaskedLM.from_config(_a )
model.resize_token_embeddings(len(_a ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase__ = datasets['train'].column_names
else:
lowerCamelCase__ = datasets['validation'].column_names
lowerCamelCase__ = 'text' if 'text' in column_names else column_names[0]
lowerCamelCase__ = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_a: Optional[int] ):
# Remove empty lines
lowerCamelCase__ = [line for line in examples['text'] if len(_a ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_a , truncation=_a , max_length=data_args.max_seq_length )
lowerCamelCase__ = datasets.map(
_a , batched=_a , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase__ = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowerCamelCase__ = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase__ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase__ = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase__ = DataCollatorForWholeWordMask(tokenizer=_a , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCamelCase__ = Trainer(
model=_a , args=_a , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_a , data_collator=_a , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase__ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowerCamelCase__ = model_args.model_name_or_path
else:
lowerCamelCase__ = None
lowerCamelCase__ = trainer.train(resume_from_checkpoint=_a )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase__ = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(_a , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
lowerCamelCase__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCamelCase__ = trainer.evaluate()
lowerCamelCase__ = math.exp(eval_output['eval_loss'] )
lowerCamelCase__ = perplexity
lowerCamelCase__ = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_a , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
return results
def snake_case ( _a: Optional[int] )-> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 709 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_a , help='where to store parsed gold_data_path file' , )
lowerCamelCase__ = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowerCamelCase__ = json.load(_a )
for dpr_record in tqdm(_a ):
lowerCamelCase__ = dpr_record['question']
lowerCamelCase__ = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_a ) + '\n' )
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
_snake_case = "src/transformers"
# Matches is_xxx_available()
_snake_case = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
_snake_case = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_snake_case = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
_snake_case = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
_snake_case = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_snake_case = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
_snake_case = re.compile(R"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
_snake_case = re.compile(R"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
_snake_case = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
_snake_case = re.compile(R"^\s*try:")
# Catches a line with else:
_snake_case = re.compile(R"^\s*else:")
def snake_case ( _a: Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
if _re_test_backend.search(snake_case_ ) is None:
return None
lowerCamelCase__ = [b[0] for b in _re_backend.findall(snake_case_ )]
backends.sort()
return "_and_".join(snake_case_ )
def snake_case ( _a: Optional[int] )-> Dict:
'''simple docstring'''
with open(snake_case_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCamelCase__ = f.readlines()
lowerCamelCase__ = 0
while line_index < len(snake_case_ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(snake_case_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCamelCase__ = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowerCamelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(snake_case_ ):
lowerCamelCase__ = _re_one_line_import_struct.search(snake_case_ ).groups()[0]
lowerCamelCase__ = re.findall(R'\[([^\]]+)\]' , snake_case_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowerCamelCase__ = _re_import_struct_key_value.search(snake_case_ )
if single_line_import_search is not None:
lowerCamelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(snake_case_ ) > 0]
objects.extend(snake_case_ )
elif line.startswith(' ' * 8 + '\"' ):
objects.append(line[9:-3] )
line_index += 1
lowerCamelCase__ = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCamelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCamelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCamelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowerCamelCase__ = lines[line_index]
if _re_import_struct_add_one.search(snake_case_ ) is not None:
objects.append(_re_import_struct_add_one.search(snake_case_ ).groups()[0] )
elif _re_import_struct_add_many.search(snake_case_ ) is not None:
lowerCamelCase__ = _re_import_struct_add_many.search(snake_case_ ).groups()[0].split(', ' )
lowerCamelCase__ = [obj[1:-1] for obj in imports if len(snake_case_ ) > 0]
objects.extend(snake_case_ )
elif _re_between_brackets.search(snake_case_ ) is not None:
lowerCamelCase__ = _re_between_brackets.search(snake_case_ ).groups()[0].split(', ' )
lowerCamelCase__ = [obj[1:-1] for obj in imports if len(snake_case_ ) > 0]
objects.extend(snake_case_ )
elif _re_quote_object.search(snake_case_ ) is not None:
objects.append(_re_quote_object.search(snake_case_ ).groups()[0] )
elif line.startswith(' ' * 8 + '\"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '\"' ):
objects.append(line[13:-3] )
line_index += 1
lowerCamelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCamelCase__ = []
while (
line_index < len(snake_case_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowerCamelCase__ = lines[line_index]
lowerCamelCase__ = _re_import.search(snake_case_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCamelCase__ = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(snake_case_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCamelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCamelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCamelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowerCamelCase__ = lines[line_index]
lowerCamelCase__ = _re_import.search(snake_case_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCamelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def snake_case ( _a: Tuple , _a: Optional[int] )-> Optional[Any]:
'''simple docstring'''
def find_duplicates(_a: List[str] ):
return [k for k, v in collections.Counter(snake_case_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCamelCase__ = []
for key in import_dict_objects.keys():
lowerCamelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
lowerCamelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCamelCase__ = """base imports""" if key == """none""" else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def snake_case ( )-> Dict:
'''simple docstring'''
lowerCamelCase__ = []
for root, _, files in os.walk(snake_case_ ):
if "__init__.py" in files:
lowerCamelCase__ = os.path.join(snake_case_ , '__init__.py' )
lowerCamelCase__ = parse_init(snake_case_ )
if objects is not None:
lowerCamelCase__ = analyze_results(*snake_case_ )
if len(snake_case_ ) > 0:
lowerCamelCase__ = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(snake_case_ ) )
if len(snake_case_ ) > 0:
raise ValueError('\n\n'.join(snake_case_ ) )
def snake_case ( )-> str:
'''simple docstring'''
lowerCamelCase__ = []
for path, directories, files in os.walk(snake_case_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(snake_case_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(snake_case_ ) / folder).glob('*.py' ) ) ) == 0:
continue
lowerCamelCase__ = str((Path(snake_case_ ) / folder).relative_to(snake_case_ ) )
lowerCamelCase__ = short_path.replace(os.path.sep , '.' )
submodules.append(snake_case_ )
for fname in files:
if fname == "__init__.py":
continue
lowerCamelCase__ = str((Path(snake_case_ ) / fname).relative_to(snake_case_ ) )
lowerCamelCase__ = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(snake_case_ )
return submodules
_snake_case = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
from transformers.utils import direct_transformers_import
lowerCamelCase__ = direct_transformers_import(snake_case_ )
lowerCamelCase__ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(snake_case_ , '__init__.py' ) , 'r' ) as f:
lowerCamelCase__ = f.read()
import_structure_keys.update(set(re.findall(R'import_structure\[\"([^\"]*)\"\]' , snake_case_ ) ) )
lowerCamelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(snake_case_ ) > 0:
lowerCamelCase__ = """\n""".join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 710 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 0 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _a :
a_ : Dict = None
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__ = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ = os.path.join(UpperCamelCase__ , 'feat_extract.json' )
feat_extract_first.to_json_file(UpperCamelCase__ )
lowerCamelCase__ = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
lowerCamelCase__ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 711 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[str, Any] = 'swinv2'
a_ : Optional[int] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int=2_24 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : str=96 , SCREAMING_SNAKE_CASE__ : Dict=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4.0 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : int=32 , **SCREAMING_SNAKE_CASE__ : List[str] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
lowerCamelCase__ = (0, 0, 0, 0)
| 659 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
"NezhaForNextSentencePrediction",
"NezhaForMaskedLM",
"NezhaForPreTraining",
"NezhaForMultipleChoice",
"NezhaForQuestionAnswering",
"NezhaForSequenceClassification",
"NezhaForTokenClassification",
"NezhaModel",
"NezhaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 712 |
"""simple docstring"""
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 0
while number > 0:
lowerCamelCase__ = number % 10
sum_of_digits += last_digit
lowerCamelCase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case ( _a: int = 100 )-> int:
'''simple docstring'''
lowerCamelCase__ = factorial(_a )
lowerCamelCase__ = split_and_add(_a )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 659 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", f"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", f"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", f"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", f"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.weight""", f"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", f"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", f"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", f"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.weight""", f"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", f"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", f"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", f"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", f"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", f"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.bias""", f"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", f"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", f"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", f"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.bias""", f"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", f"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def snake_case ( _a: Tuple , _a: List[Any] , _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = state_dict.pop(__snake_case )
lowerCamelCase__ = val
def snake_case ( _a: Optional[int] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCamelCase__ = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
lowerCamelCase__ = value
else:
lowerCamelCase__ = value
return new_state_dict
def snake_case ( _a: Optional[Any] , _a: Optional[int]=False )-> str:
'''simple docstring'''
lowerCamelCase__ = ""
if is_panoptic:
lowerCamelCase__ = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCamelCase__ = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
lowerCamelCase__ = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[:256, :]
lowerCamelCase__ = in_proj_bias[:256]
lowerCamelCase__ = in_proj_weight[256:512, :]
lowerCamelCase__ = in_proj_bias[256:512]
lowerCamelCase__ = in_proj_weight[-256:, :]
lowerCamelCase__ = in_proj_bias[-256:]
def snake_case ( )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__ = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def snake_case ( _a: Any , _a: Optional[int] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowerCamelCase__ = "resnet101"
if "dc5" in model_name:
lowerCamelCase__ = True
lowerCamelCase__ = "panoptic" in model_name
if is_panoptic:
lowerCamelCase__ = 250
else:
lowerCamelCase__ = 91
lowerCamelCase__ = "huggingface/label-files"
lowerCamelCase__ = "coco-detection-id2label.json"
lowerCamelCase__ = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ = {int(__snake_case ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
# load image processor
lowerCamelCase__ = "coco_panoptic" if is_panoptic else "coco_detection"
lowerCamelCase__ = ConditionalDetrImageProcessor(format=__snake_case )
# prepare image
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=__snake_case , return_tensors='pt' )
lowerCamelCase__ = encoding["pixel_values"]
logger.info(F'Converting model {model_name}...' )
# load original model from torch hub
lowerCamelCase__ = torch.hub.load('DeppMeng/ConditionalDETR' , __snake_case , pretrained=__snake_case ).eval()
lowerCamelCase__ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowerCamelCase__ = "conditional_detr." + src
rename_key(__snake_case , __snake_case , __snake_case )
lowerCamelCase__ = rename_backbone_keys(__snake_case )
# query, key and value matrices need special treatment
read_in_q_k_v(__snake_case , is_panoptic=__snake_case )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCamelCase__ = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
lowerCamelCase__ = state_dict.pop(__snake_case )
lowerCamelCase__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCamelCase__ = state_dict.pop(__snake_case )
lowerCamelCase__ = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
lowerCamelCase__ = state_dict.pop(__snake_case )
lowerCamelCase__ = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
lowerCamelCase__ = state_dict.pop(__snake_case )
lowerCamelCase__ = val
# finally, create HuggingFace model and load state dict
lowerCamelCase__ = ConditionalDetrForSegmentation(__snake_case ) if is_panoptic else ConditionalDetrForObjectDetection(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
model.push_to_hub(repo_id=__snake_case , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
lowerCamelCase__ = conditional_detr(__snake_case )
lowerCamelCase__ = model(__snake_case )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4 )
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_snake_case = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 713 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def snake_case ( _a: List[str] )-> Optional[MinHash]:
'''simple docstring'''
if len(_a ) < MIN_NUM_TOKENS:
return None
lowerCamelCase__ = MinHash(num_perm=_a )
for token in set(_a ):
min_hash.update(token.encode() )
return min_hash
def snake_case ( _a: str )-> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(_a ) if len(t.strip() ) > 0}
class _a :
def __init__( self : List[Any] , *,
SCREAMING_SNAKE_CASE__ : float = 0.85 , ):
lowerCamelCase__ = duplication_jaccard_threshold
lowerCamelCase__ = NUM_PERM
lowerCamelCase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : MinHash ):
lowerCamelCase__ = self._index.query(SCREAMING_SNAKE_CASE__ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(SCREAMING_SNAKE_CASE__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCamelCase__ = [base] + list(SCREAMING_SNAKE_CASE__ )
# reformat the cluster to be a list of dict
lowerCamelCase__ = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(SCREAMING_SNAKE_CASE__ )
return duplicate_clusters
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.get_duplicate_clusters()
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = element
lowerCamelCase__ = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case ( _a: Type[Dataset] )-> Tuple:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_a , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case ( _a: Type[Dataset] , _a: float )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = DuplicationIndex(duplication_jaccard_threshold=_a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_a ) ) , max_queue_size=100 ) ):
di.add(_a , _a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case ( _a: str , _a: str )-> float:
'''simple docstring'''
lowerCamelCase__ = get_tokens(_a )
lowerCamelCase__ = get_tokens(_a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def snake_case ( _a: Dict , _a: Union[str, Any] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for elementa in cluster:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_a , _a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCamelCase__ = 1
extremes.append(_a )
return extremes
def snake_case ( _a: Any , _a: Tuple , _a: Dict )-> Union[str, Any]:
'''simple docstring'''
global _shared_dataset
lowerCamelCase__ = dataset
lowerCamelCase__ = []
lowerCamelCase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=_a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_a , _a , ) , total=len(_a ) , ):
extremes_list.append(_a )
return extremes_list
def snake_case ( _a: Type[Dataset] , _a: float = 0.85 )-> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowerCamelCase__ = make_duplicate_clusters(_a , _a )
lowerCamelCase__ = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
lowerCamelCase__ = {}
lowerCamelCase__ = find_extremes(_a , _a , _a )
for extremes in extremes_clusters:
for element in extremes:
lowerCamelCase__ = element
lowerCamelCase__ = duplicate_indices - set(extreme_dict.keys() )
lowerCamelCase__ = dataset.filter(lambda _a , _a : idx not in remove_indices , with_indices=_a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCamelCase__ = element['base_index'] in extreme_dict
if element["is_extreme"]:
lowerCamelCase__ = extreme_dict[element['base_index']]['copies']
print(F'Original dataset size: {len(_a )}' )
print(F'Number of duplicate clusters: {len(_a )}' )
print(F'Files in duplicate cluster: {len(_a )}' )
print(F'Unique files in duplicate cluster: {len(_a )}' )
print(F'Filtered dataset size: {len(_a )}' )
return ds_filter, duplicate_clusters
| 659 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class _a ( __lowercase , __lowercase ):
a_ : Optional[Any] = 'dinat'
a_ : str = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : List[str]=64 , SCREAMING_SNAKE_CASE__ : Optional[Any]=[3, 4, 6, 5] , SCREAMING_SNAKE_CASE__ : Any=[2, 4, 8, 16] , SCREAMING_SNAKE_CASE__ : List[Any]=7 , SCREAMING_SNAKE_CASE__ : int=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , SCREAMING_SNAKE_CASE__ : str=3.0 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.02 , SCREAMING_SNAKE_CASE__ : int=1e-5 , SCREAMING_SNAKE_CASE__ : List[str]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Any=None , **SCREAMING_SNAKE_CASE__ : List[str] , ):
super().__init__(**__A )
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(__A )
lowerCamelCase__ = num_heads
lowerCamelCase__ = kernel_size
lowerCamelCase__ = dilations
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(__A ) - 1) )
lowerCamelCase__ = layer_scale_init_value
lowerCamelCase__ = ['stem'] + [F'stage{idx}' for idx in range(1 , len(__A ) + 1 )]
lowerCamelCase__ , lowerCamelCase__ = get_aligned_output_features_output_indices(
out_features=__A , out_indices=__A , stage_names=self.stage_names )
| 714 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_snake_case = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def snake_case ( _a: Any )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = test_results.split(' ' )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase__ = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def snake_case ( _a: Optional[int] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = {}
lowerCamelCase__ = None
lowerCamelCase__ = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , _a ):
lowerCamelCase__ = True
lowerCamelCase__ = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
lowerCamelCase__ = line
lowerCamelCase__ = False
return failures
class _a :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = title
lowerCamelCase__ = doc_test_results['time_spent'].split(',' )[0]
lowerCamelCase__ = doc_test_results['success']
lowerCamelCase__ = doc_test_results['failures']
lowerCamelCase__ = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase__ = doc_test_results
@property
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = [self._time_spent]
lowerCamelCase__ = 0
for time in time_spent:
lowerCamelCase__ = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
lowerCamelCase__ = [0, 0, time_parts[0]]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'
@property
def _UpperCamelCase ( self : Dict ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self : Dict ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Any ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = 40
lowerCamelCase__ = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
lowerCamelCase__ = ''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def _UpperCamelCase ( ):
lowerCamelCase__ = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[int] ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
lowerCamelCase__ = F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
lowerCamelCase__ = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = ''
for key, value in failures.items():
lowerCamelCase__ = value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value
failures_text += F'*{key}*\n_{value}_\n\n'
lowerCamelCase__ = job_name
lowerCamelCase__ = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
lowerCamelCase__ = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self : Optional[int] ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
lowerCamelCase__ = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
lowerCamelCase__ = sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
lowerCamelCase__ = F'*Num failures* :{len(job_result["failed"] )} \n'
lowerCamelCase__ = job_result['failures']
lowerCamelCase__ = self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def snake_case ( )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = os.environ['GITHUB_RUN_ID']
lowerCamelCase__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
lowerCamelCase__ = requests.get(_a ).json()
lowerCamelCase__ = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
lowerCamelCase__ = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_a ):
lowerCamelCase__ = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , _a )
return {}
def snake_case ( _a: str )-> Dict:
'''simple docstring'''
lowerCamelCase__ = {}
if os.path.exists(_a ):
lowerCamelCase__ = os.listdir(_a )
for file in files:
try:
with open(os.path.join(_a , _a ) , encoding='utf-8' ) as f:
lowerCamelCase__ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(_a , _a )}.' ) from e
return _artifact
def snake_case ( )-> Optional[int]:
'''simple docstring'''
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = name
lowerCamelCase__ = []
def __str__( self : Dict ):
return self.name
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
self.paths.append({'name': self.name, 'path': path} )
lowerCamelCase__ = {}
lowerCamelCase__ = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase__ = directory
if artifact_name not in _available_artifacts:
lowerCamelCase__ = Artifact(_a )
_available_artifacts[artifact_name].add_path(_a )
return _available_artifacts
if __name__ == "__main__":
_snake_case = get_job_links()
_snake_case = retrieve_available_artifacts()
_snake_case = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_snake_case = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_snake_case = github_actions_job_links.get("run_doctests")
_snake_case = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_snake_case = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_snake_case , _snake_case , _snake_case = handle_test_results(artifact["stats"])
_snake_case = failed
_snake_case = success
_snake_case = time_spent[1:-1] + ", "
_snake_case = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_snake_case = line.replace("FAILED ", "")
_snake_case = line.split()[0].replace("\n", "")
if "::" in line:
_snake_case , _snake_case = line.split("::")
else:
_snake_case , _snake_case = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_snake_case = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_snake_case = all_failures[test] if test in all_failures else "N/A"
_snake_case = failure
break
_snake_case = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 659 | 0 |
"""simple docstring"""
def snake_case ( _a: int = 3 , _a: int = 7 , _a: int = 1000000 )-> Any:
'''simple docstring'''
lowerCamelCase__ = 0
lowerCamelCase__ = 1
for current_denominator in range(1 , limit + 1 ):
lowerCamelCase__ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
lowerCamelCase__ = current_numerator
lowerCamelCase__ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 715 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[PIL.Image.Image, np.ndarray]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : PriorTransformer , SCREAMING_SNAKE_CASE__ : CLIPVisionModel , SCREAMING_SNAKE_CASE__ : CLIPImageProcessor , SCREAMING_SNAKE_CASE__ : HeunDiscreteScheduler , SCREAMING_SNAKE_CASE__ : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=SCREAMING_SNAKE_CASE__ , image_encoder=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , renderer=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
if latents is None:
lowerCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase__ = latents.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase__ = torch.device(F'cuda:{gpu_id}' )
lowerCamelCase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self : Dict ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE__ , axis=0 ) if image[0].ndim == 4 else torch.stack(SCREAMING_SNAKE_CASE__ , axis=0 )
if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
lowerCamelCase__ = image.to(dtype=self.image_encoder.dtype , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.image_encoder(SCREAMING_SNAKE_CASE__ )['last_hidden_state']
lowerCamelCase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase__ = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ = torch.zeros_like(SCREAMING_SNAKE_CASE__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 25 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : float = 4.0 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ):
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowerCamelCase__ = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = image.shape[0]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(SCREAMING_SNAKE_CASE__ )}' )
lowerCamelCase__ = self._execution_device
lowerCamelCase__ = batch_size * num_images_per_prompt
lowerCamelCase__ = guidance_scale > 1.0
lowerCamelCase__ = self._encode_image(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# prior
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.scheduler.timesteps
lowerCamelCase__ = self.prior.config.num_embeddings
lowerCamelCase__ = self.prior.config.embedding_dim
lowerCamelCase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase__ = latents.reshape(latents.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.prior(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , proj_embedding=SCREAMING_SNAKE_CASE__ , ).predicted_image_embedding
# remove the variance
lowerCamelCase__ , lowerCamelCase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 )
lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase__ = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
for i, latent in enumerate(SCREAMING_SNAKE_CASE__ ):
print()
lowerCamelCase__ = self.renderer.decode(
latent[None, :] , SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.stack(SCREAMING_SNAKE_CASE__ )
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase__ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase__ = [self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
_snake_case = 5_0003
_snake_case = 5_0002
@require_sentencepiece
@require_tokenizers
class _a ( __UpperCAmelCase , unittest.TestCase ):
a_ : List[str] = PLBartTokenizer
a_ : Tuple = None
a_ : List[Any] = False
def _UpperCamelCase ( self : str ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ = PLBartTokenizer(__SCREAMING_SNAKE_CASE , language_codes='base' , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = PLBartTokenizer(__SCREAMING_SNAKE_CASE , language_codes='base' , keep_accents=__SCREAMING_SNAKE_CASE )
lowerCamelCase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowerCamelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
lowerCamelCase__ = tokenizer.vocab_size
lowerCamelCase__ = [tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ) for x in range(end - 4 , __SCREAMING_SNAKE_CASE )]
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['__java__', '__python__', '__en_XX__', '<mask>'] )
lowerCamelCase__ = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
lowerCamelCase__ = tokenizer(__SCREAMING_SNAKE_CASE ).input_ids
self.assertEqual(
tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , )
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = PLBartTokenizer(__SCREAMING_SNAKE_CASE , language_codes='multi' , keep_accents=__SCREAMING_SNAKE_CASE )
lowerCamelCase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowerCamelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
lowerCamelCase__ = tokenizer.vocab_size
lowerCamelCase__ = [tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ) for x in range(end - 7 , __SCREAMING_SNAKE_CASE )]
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['__java__', '__python__', '__en_XX__', '__javascript__', '__php__', '__ruby__', '__go__'] )
lowerCamelCase__ = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
lowerCamelCase__ = tokenizer(__SCREAMING_SNAKE_CASE ).input_ids
self.assertEqual(
tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
a_ : Union[str, Any] = '''uclanlp/plbart-python-en_XX'''
a_ : Optional[int] = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
a_ : Any = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
a_ : Any = [
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def _UpperCamelCase ( cls : str ):
lowerCamelCase__ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='base' , src_lang='python' , tgt_lang='en_XX' )
lowerCamelCase__ = 1
return cls
def _UpperCamelCase ( self : int ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__java__'] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__python__'] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__en_XX__'] , 5_00_03 )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self : Any ):
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
lowerCamelCase__ = [EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
lowerCamelCase__ = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = ['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 20]
self.assertIsInstance(src_text[0] , __SCREAMING_SNAKE_CASE )
lowerCamelCase__ = 10
lowerCamelCase__ = self.tokenizer(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self : Tuple ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', '__java__'] ) , [5_00_04, 5_00_01] )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
lowerCamelCase__ = PLBartTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __SCREAMING_SNAKE_CASE )
@require_torch
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
lowerCamelCase__ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __SCREAMING_SNAKE_CASE )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
lowerCamelCase__ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
lowerCamelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.tokenizer(self.src_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=3 , return_tensors='pt' )
lowerCamelCase__ = self.tokenizer(
text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=10 , return_tensors='pt' )
lowerCamelCase__ = targets['input_ids']
lowerCamelCase__ = shift_tokens_right(__SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='java' )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
# A, test, EOS, en_XX
'input_ids': [[1_50, 2_42, 2, 5_00_03]],
'attention_mask': [[1, 1, 1, 1]],
# java
'forced_bos_token_id': 5_00_01,
} , )
| 716 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_snake_case = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = VOCAB_FILES_NAMES
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = ['input_ids', 'attention_mask']
a_ : Union[str, Any] = NllbTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : Any="<mask>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=False , **SCREAMING_SNAKE_CASE__ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self : List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 659 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import random
class _a :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any = None ):
lowerCamelCase__ = value
lowerCamelCase__ = random()
lowerCamelCase__ = None
lowerCamelCase__ = None
def __repr__( self : Any ):
from pprint import pformat
if self.left is None and self.right is None:
return F'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{F'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 )
def __str__( self : List[Any] ):
lowerCamelCase__ = str(self.value ) + ' '
lowerCamelCase__ = str(self.left or '' )
lowerCamelCase__ = str(self.right or '' )
return value + left + right
def snake_case ( _a: Node | None , _a: int )-> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowerCamelCase__ = split(root.left , _a )
return left, root
else:
lowerCamelCase__ = split(root.right , _a )
return root, right
def snake_case ( _a: Node | None , _a: Node | None )-> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowerCamelCase__ = merge(left.right , _a )
return left
else:
lowerCamelCase__ = merge(_a , right.left )
return right
def snake_case ( _a: Node | None , _a: int )-> Node | None:
'''simple docstring'''
lowerCamelCase__ = Node(_a )
lowerCamelCase__ = split(_a , _a )
return merge(merge(_a , _a ) , _a )
def snake_case ( _a: Node | None , _a: int )-> Node | None:
'''simple docstring'''
lowerCamelCase__ = split(_a , value - 1 )
lowerCamelCase__ = split(_a , _a )
return merge(_a , _a )
def snake_case ( _a: Node | None )-> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def snake_case ( _a: Node | None , _a: str )-> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
lowerCamelCase__ = insert(_a , int(arg[1:] ) )
elif arg[0] == "-":
lowerCamelCase__ = erase(_a , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def snake_case ( )-> None:
'''simple docstring'''
lowerCamelCase__ = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
lowerCamelCase__ = input()
while args != "q":
lowerCamelCase__ = interact_treap(_a , _a )
print(_a )
lowerCamelCase__ = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 717 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=99 , SCREAMING_SNAKE_CASE__ : Optional[Any]=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=37 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=5_12 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Any=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
lowerCamelCase__ = bos_token_id
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase__ = input_mask.numpy()
lowerCamelCase__ , lowerCamelCase__ = input_mask.shape
lowerCamelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = 1
lowerCamelCase__ = 0
lowerCamelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFBlipTextModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : str = (TFBlipTextModel,) if is_tf_available() else ()
a_ : List[str] = False
a_ : Optional[Any] = False
a_ : Union[str, Any] = False
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = BlipTextModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
pass
def _UpperCamelCase ( self : Tuple ):
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCamelCase ( self : List[str] ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : Dict ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : List[Any] ):
pass
@slow
def _UpperCamelCase ( self : str ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
_snake_case = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_snake_case = [{"type": "code", "content": INSTALL_CONTENT}]
_snake_case = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 718 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 659 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class _a ( __snake_case ):
a_ : str = 'openai-gpt'
a_ : Tuple = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any]=4_04_78 , SCREAMING_SNAKE_CASE__ : Dict=5_12 , SCREAMING_SNAKE_CASE__ : Any=7_68 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]="cls_index" , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = afn
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = summary_type
lowerCamelCase__ = summary_use_proj
lowerCamelCase__ = summary_activation
lowerCamelCase__ = summary_first_dropout
lowerCamelCase__ = summary_proj_to_labels
super().__init__(**_lowercase )
| 719 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
_snake_case = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {f"""funnel-transformer/{name}""": 512 for name in _model_names}
_snake_case = {f"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = VOCAB_FILES_NAMES
a_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
a_ : List[str] = FunnelTokenizer
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : int = 2
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : List[Any]="<sep>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : Tuple="<cls>" , SCREAMING_SNAKE_CASE__ : Tuple="<mask>" , SCREAMING_SNAKE_CASE__ : Any="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : int="##" , **SCREAMING_SNAKE_CASE__ : Any , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , clean_text=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , wordpieces_prefix=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('type' ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = do_lower_case
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
lowerCamelCase__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_snake_case = logging.getLogger(__name__)
def snake_case ( _a: Optional[Any]=2 , _a: Union[str, Any]=3 , _a: int=16 , _a: int = 10 , _a: int = 2 )-> int:
'''simple docstring'''
def get_dataset(_a: Optional[int] ):
lowerCamelCase__ = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
lowerCamelCase__ = get_dataset(lowercase_ )
lowerCamelCase__ = get_dataset(lowercase_ )
lowerCamelCase__ = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
lowerCamelCase__ = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def snake_case ( _a: Optional[Any] , _a: List[str] , _a: int , _a: int , _a: List[str] , _a: Dict=None )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = []
for epoch in range(lowercase_ ):
# Train quickly
model.train()
for batch in dataloader:
lowerCamelCase__ = batch
lowerCamelCase__ = model(lowercase_ )
lowerCamelCase__ = torch.nn.functional.mse_loss(lowercase_ , lowercase_ )
accelerator.backward(lowercase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a ( nn.Module ):
def __init__( self : List[Any] ):
super().__init__()
lowerCamelCase__ = nn.Parameter(torch.randn(1 ) )
lowerCamelCase__ = nn.Parameter(torch.randn(1 ) )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
return x * self.a + self.b
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCamelCase__ = DummyModel()
lowerCamelCase__ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCamelCase__ = dummy_dataloaders()
lowerCamelCase__ = ProjectConfiguration(total_limit=1 , project_dir=A__ , automatic_checkpoint_naming=A__ )
# Train baseline
lowerCamelCase__ = Accelerator(project_config=A__ )
lowerCamelCase__ = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def _UpperCamelCase ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCamelCase__ = DummyModel()
lowerCamelCase__ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCamelCase__ = dummy_dataloaders()
# Train baseline
lowerCamelCase__ = Accelerator()
lowerCamelCase__ = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
lowerCamelCase__ = os.path.join(A__ , 'initial' )
accelerator.save_state(A__ )
(lowerCamelCase__) = model.a.item(), model.b.item()
lowerCamelCase__ = optimizer.state_dict()
lowerCamelCase__ = train(3 , A__ , A__ , A__ , A__ )
(lowerCamelCase__) = model.a.item(), model.b.item()
lowerCamelCase__ = optimizer.state_dict()
# Train partially
set_seed(42 )
lowerCamelCase__ = DummyModel()
lowerCamelCase__ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCamelCase__ = dummy_dataloaders()
lowerCamelCase__ = Accelerator()
lowerCamelCase__ = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(A__ )
(lowerCamelCase__) = model.a.item(), model.b.item()
lowerCamelCase__ = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
lowerCamelCase__ = train(2 , A__ , A__ , A__ , A__ )
# Save everything
lowerCamelCase__ = os.path.join(A__ , 'checkpoint' )
accelerator.save_state(A__ )
# Load everything back in and make sure all states work
accelerator.load_state(A__ )
test_rands += train(1 , A__ , A__ , A__ , A__ )
(lowerCamelCase__) = model.a.item(), model.b.item()
lowerCamelCase__ = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def _UpperCamelCase ( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCamelCase__ = DummyModel()
lowerCamelCase__ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCamelCase__ = dummy_dataloaders()
lowerCamelCase__ = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
lowerCamelCase__ = Accelerator(project_dir=A__ , project_config=A__ )
lowerCamelCase__ = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
(lowerCamelCase__) = model.a.item(), model.b.item()
lowerCamelCase__ = optimizer.state_dict()
lowerCamelCase__ = train(3 , A__ , A__ , A__ , A__ )
(lowerCamelCase__) = model.a.item(), model.b.item()
lowerCamelCase__ = optimizer.state_dict()
# Train partially
set_seed(42 )
lowerCamelCase__ = DummyModel()
lowerCamelCase__ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCamelCase__ = dummy_dataloaders()
lowerCamelCase__ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A__ )
lowerCamelCase__ = Accelerator(project_dir=A__ , project_config=A__ )
lowerCamelCase__ = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(os.path.join(A__ , 'checkpoints' , 'checkpoint_0' ) )
(lowerCamelCase__) = model.a.item(), model.b.item()
lowerCamelCase__ = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
lowerCamelCase__ = train(2 , A__ , A__ , A__ , A__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , A__ , A__ , A__ , A__ )
(lowerCamelCase__) = model.a.item(), model.b.item()
lowerCamelCase__ = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = torch.tensor([1, 2, 3] )
lowerCamelCase__ = torch.tensor([2, 3, 4] )
lowerCamelCase__ = DummyModel()
lowerCamelCase__ = torch.optim.Adam(net.parameters() )
lowerCamelCase__ = Accelerator()
with self.assertRaises(A__ ) as ve:
accelerator.register_for_checkpointing(A__ , A__ , A__ , A__ )
lowerCamelCase__ = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def _UpperCamelCase ( self : str ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCamelCase__ = DummyModel()
lowerCamelCase__ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCamelCase__ = torch.optim.lr_scheduler.StepLR(A__ , step_size=1 , gamma=0.99 )
lowerCamelCase__ = dummy_dataloaders()
lowerCamelCase__ = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
lowerCamelCase__ = Accelerator(project_dir=A__ , project_config=A__ )
lowerCamelCase__ = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
lowerCamelCase__ = scheduler.state_dict()
train(3 , A__ , A__ , A__ , A__ , A__ )
self.assertNotEqual(A__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(A__ , scheduler.state_dict() )
def _UpperCamelCase ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCamelCase__ = DummyModel()
lowerCamelCase__ = ProjectConfiguration(automatic_checkpoint_naming=A__ , total_limit=2 )
# Train baseline
lowerCamelCase__ = Accelerator(project_dir=A__ , project_config=A__ )
lowerCamelCase__ = accelerator.prepare(A__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A__ , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = ["""torchrun""", F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(A__ , env=os.environ.copy() )
if __name__ == "__main__":
_snake_case = '/tmp/accelerate/state_checkpointing'
_snake_case = DummyModel()
_snake_case = torch.optim.Adam(params=model.parameters(), lr=1e-3)
_snake_case = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_snake_case = dummy_dataloaders()
_snake_case = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_snake_case = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_snake_case = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_snake_case = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_snake_case = group['params'][0].device
break
assert param_device.type == accelerator.device.type
_snake_case = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
_snake_case = group['params'][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
_snake_case = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 720 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case ( _a: Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = [-1] * len(_a )
def dfs(_a: Any , _a: Optional[int] ):
lowerCamelCase__ = True
lowerCamelCase__ = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 659 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def snake_case ( _a: Any )-> List[Any]:
'''simple docstring'''
if "cls_token" in name:
lowerCamelCase__ = name.replace('cls_token' , 'vit.embeddings.cls_token' )
if "mask_token" in name:
lowerCamelCase__ = name.replace('mask_token' , 'decoder.mask_token' )
if "decoder_pos_embed" in name:
lowerCamelCase__ = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase__ = name.replace('pos_embed' , 'vit.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowerCamelCase__ = name.replace('patch_embed.proj' , 'vit.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowerCamelCase__ = name.replace('patch_embed.norm' , 'vit.embeddings.norm' )
if "decoder_blocks" in name:
lowerCamelCase__ = name.replace('decoder_blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
lowerCamelCase__ = name.replace('blocks' , 'vit.encoder.layer' )
if "attn.proj" in name:
lowerCamelCase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCamelCase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCamelCase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCamelCase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCamelCase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCamelCase__ = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
lowerCamelCase__ = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
lowerCamelCase__ = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
lowerCamelCase__ = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name:
lowerCamelCase__ = name.replace('norm.weight' , 'vit.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name:
lowerCamelCase__ = name.replace('norm.bias' , 'vit.layernorm.bias' )
return name
def snake_case ( _a: Optional[Any] , _a: List[Any] )-> Tuple:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCamelCase__ = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
lowerCamelCase__ = key.split('.' )
lowerCamelCase__ = int(key_split[1] )
if "decoder_blocks" in key:
lowerCamelCase__ = config.decoder_hidden_size
lowerCamelCase__ = 'decoder.decoder_layers.'
if "weight" in key:
lowerCamelCase__ = val[:dim, :]
lowerCamelCase__ = val[dim : dim * 2, :]
lowerCamelCase__ = val[-dim:, :]
elif "bias" in key:
lowerCamelCase__ = val[:dim]
lowerCamelCase__ = val[dim : dim * 2]
lowerCamelCase__ = val[-dim:]
else:
lowerCamelCase__ = config.hidden_size
lowerCamelCase__ = 'vit.encoder.layer.'
if "weight" in key:
lowerCamelCase__ = val[:dim, :]
lowerCamelCase__ = val[dim : dim * 2, :]
lowerCamelCase__ = val[-dim:, :]
elif "bias" in key:
lowerCamelCase__ = val[:dim]
lowerCamelCase__ = val[dim : dim * 2]
lowerCamelCase__ = val[-dim:]
else:
lowerCamelCase__ = val
return orig_state_dict
def snake_case ( _a: Union[str, Any] , _a: List[Any] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = ViTMAEConfig()
if "large" in checkpoint_url:
lowerCamelCase__ = 1024
lowerCamelCase__ = 4096
lowerCamelCase__ = 24
lowerCamelCase__ = 16
elif "huge" in checkpoint_url:
lowerCamelCase__ = 14
lowerCamelCase__ = 1280
lowerCamelCase__ = 5120
lowerCamelCase__ = 32
lowerCamelCase__ = 16
lowerCamelCase__ = ViTMAEForPreTraining(lowerCAmelCase__ )
lowerCamelCase__ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='cpu' )['model']
lowerCamelCase__ = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase__ = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
lowerCamelCase__ = 'https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'
lowerCamelCase__ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
lowerCamelCase__ = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase__ = image_processor(images=lowerCAmelCase__ , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
lowerCamelCase__ = model(**lowerCAmelCase__ )
lowerCamelCase__ = outputs.logits
if "large" in checkpoint_url:
lowerCamelCase__ = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
lowerCamelCase__ = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
lowerCamelCase__ = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 721 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_snake_case = TypeVar("KEY")
_snake_case = TypeVar("VAL")
@dataclass(frozen=SCREAMING_SNAKE_CASE_ , slots=SCREAMING_SNAKE_CASE_ )
class _a ( Generic[KEY, VAL] ):
a_ : KEY
a_ : VAL
class _a ( _Item ):
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : str ):
return False
_snake_case = _DeletedItem()
class _a ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ):
lowerCamelCase__ = initial_block_size
lowerCamelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ = capacity_factor
lowerCamelCase__ = 0
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY ):
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int ):
return (ind + 1) % len(self._buckets )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
lowerCamelCase__ = self._buckets[ind]
if not stored:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self._buckets
lowerCamelCase__ = [None] * new_size
lowerCamelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _UpperCamelCase ( self : List[str] ):
self._resize(len(self._buckets ) * 2 )
def _UpperCamelCase ( self : Optional[int] ):
self._resize(len(self._buckets ) // 2 )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ):
lowerCamelCase__ = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : List[Any] ):
return self._len
def __iter__( self : Optional[int] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowerCamelCase__ = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 659 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : int = DiTPipeline
a_ : Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
a_ : Tuple = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
a_ : Tuple = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
a_ : str = False
def _UpperCamelCase ( self : Dict ):
torch.manual_seed(0 )
lowerCamelCase__ = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=UpperCamelCase__ , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=UpperCamelCase__ , )
lowerCamelCase__ = AutoencoderKL()
lowerCamelCase__ = DDIMScheduler()
lowerCamelCase__ = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str=0 ):
if str(UpperCamelCase__ ).startswith('mps' ):
lowerCamelCase__ = torch.manual_seed(UpperCamelCase__ )
else:
lowerCamelCase__ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowerCamelCase__ = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = 'cpu'
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ = self.get_dummy_inputs(UpperCamelCase__ )
lowerCamelCase__ = pipe(**UpperCamelCase__ ).images
lowerCamelCase__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowerCamelCase__ = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
lowerCamelCase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
def _UpperCamelCase ( self : List[Any] ):
self._test_inference_batch_single_identical(relax_max_difference=UpperCamelCase__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _UpperCamelCase ( self : int ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
lowerCamelCase__ = ['vase', 'umbrella', 'white shark', 'white wolf']
lowerCamelCase__ = pipe.get_label_ids(UpperCamelCase__ )
lowerCamelCase__ = pipe(UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ = load_numpy(
F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1e-2
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
lowerCamelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
lowerCamelCase__ = ['vase', 'umbrella']
lowerCamelCase__ = pipe.get_label_ids(UpperCamelCase__ )
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = pipe(UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
F'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 700 |
"""simple docstring"""
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations(_a: int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a: int , _a: list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase__ = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
lowerCamelCase__ = answer
return answer
lowerCamelCase__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = [0] * (target + 1)
lowerCamelCase__ = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 659 | 0 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
_snake_case = logging.get_logger(__name__)
def snake_case ( _a: Optional[Any] , _a: Optional[Any] , _a: int , _a: Tuple=None , _a: Dict=None )-> Optional[int]:
'''simple docstring'''
if "." in tensor_name:
lowerCamelCase__ = tensor_name.split('.' )
for split in splits[:-1]:
lowerCamelCase__ = getattr(_a , _a )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
lowerCamelCase__ = new_module
lowerCamelCase__ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.' )
lowerCamelCase__ = tensor_name in module._buffers
lowerCamelCase__ = getattr(_a , _a )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
lowerCamelCase__ = False
lowerCamelCase__ = False
if is_buffer or not is_bitsandbytes_available():
lowerCamelCase__ = False
lowerCamelCase__ = False
else:
lowerCamelCase__ = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
lowerCamelCase__ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
lowerCamelCase__ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowerCamelCase__ = old_value.to(_a )
elif isinstance(_a , torch.Tensor ):
lowerCamelCase__ = value.to('cpu' )
if value.dtype == torch.inta:
lowerCamelCase__ = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
lowerCamelCase__ = torch.tensor(_a , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _a ) and fpaa_statistics is None:
lowerCamelCase__ = new_value.T
lowerCamelCase__ = old_value.__dict__
if is_abit:
lowerCamelCase__ = bnb.nn.IntaParams(_a , requires_grad=_a , **_a ).to(_a )
elif is_abit:
lowerCamelCase__ = bnb.nn.Paramsabit(_a , requires_grad=_a , **_a ).to(_a )
lowerCamelCase__ = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(_a ) )
else:
if value is None:
lowerCamelCase__ = old_value.to(_a )
elif isinstance(_a , torch.Tensor ):
lowerCamelCase__ = value.to(_a )
else:
lowerCamelCase__ = torch.tensor(_a , device=_a )
if is_buffer:
lowerCamelCase__ = new_value
else:
lowerCamelCase__ = nn.Parameter(_a , requires_grad=old_value.requires_grad )
lowerCamelCase__ = new_value
def snake_case ( _a: Union[str, Any] , _a: Dict=None , _a: List[Any]=None , _a: Dict=None , _a: Any=False )-> Dict:
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
lowerCamelCase__ = []
current_key_name.append(_a )
if (isinstance(_a , nn.Linear ) or isinstance(_a , _a )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(_a ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_a , _a ):
lowerCamelCase__ , lowerCamelCase__ = module.weight.shape
else:
lowerCamelCase__ = module.in_features
lowerCamelCase__ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowerCamelCase__ = bnb.nn.LinearabitLt(
_a , _a , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
lowerCamelCase__ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowerCamelCase__ = bnb.nn.Linearabit(
_a , _a , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
lowerCamelCase__ = True
# Store the module class in case we need to transpose the weight later
lowerCamelCase__ = type(_a )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_a )
if len(list(module.children() ) ) > 0:
lowerCamelCase__ , lowerCamelCase__ = _replace_with_bnb_linear(
_a , _a , _a , _a , has_been_replaced=_a , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def snake_case ( _a: Optional[Any] , _a: str=None , _a: str=None , _a: Optional[Any]=None )-> str:
'''simple docstring'''
lowerCamelCase__ = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
lowerCamelCase__ , lowerCamelCase__ = _replace_with_bnb_linear(
_a , _a , _a , _a )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def snake_case ( *_a: Tuple , **_a: int )-> List[Any]:
'''simple docstring'''
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , _a , )
return replace_with_bnb_linear(*_a , **_a )
def snake_case ( *_a: Tuple , **_a: int )-> int:
'''simple docstring'''
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , _a , )
return set_module_quantized_tensor_to_device(*_a , **_a )
def snake_case ( _a: Optional[int] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = deepcopy(_a ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowerCamelCase__ = find_tied_parameters(_a )
# For compatibility with Accelerate < 0.18
if isinstance(_a , _a ):
lowerCamelCase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCamelCase__ = sum(_a , [] )
lowerCamelCase__ = len(_a ) > 0
# Check if it is a base model
lowerCamelCase__ = not hasattr(_a , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCamelCase__ = list(model.named_children() )
lowerCamelCase__ = [list_modules[-1][0]]
# add last module together with tied weights
lowerCamelCase__ = set(_a ) - set(_a )
lowerCamelCase__ = list(set(_a ) ) + list(_a )
# remove ".weight" from the keys
lowerCamelCase__ = ['.weight', '.bias']
lowerCamelCase__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCamelCase__ = name.replace(_a , '' )
filtered_module_names.append(_a )
return filtered_module_names
| 701 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _a :
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=4 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str=99 , SCREAMING_SNAKE_CASE__ : List[str]=36 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : List[str]=37 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=5_12 , SCREAMING_SNAKE_CASE__ : int=16 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=6 , SCREAMING_SNAKE_CASE__ : Any=6 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : str=10_00 , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = coordinate_size
lowerCamelCase__ = shape_size
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
lowerCamelCase__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCamelCase__ = text_seq_length
lowerCamelCase__ = (image_size // patch_size) ** 2 + 1
lowerCamelCase__ = self.text_seq_length + self.image_seq_length
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
lowerCamelCase__ = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase__ = bbox[i, j, 3]
lowerCamelCase__ = bbox[i, j, 1]
lowerCamelCase__ = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase__ = bbox[i, j, 2]
lowerCamelCase__ = bbox[i, j, 0]
lowerCamelCase__ = tmp_coordinate
lowerCamelCase__ = tf.constant(lowercase__ )
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCamelCase__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = TFLayoutLMvaModel(config=lowercase__ )
# text + image
lowerCamelCase__ = model(lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
lowerCamelCase__ = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , training=lowercase__ , )
lowerCamelCase__ = model(lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCamelCase__ = model(lowercase__ , training=lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCamelCase__ = model({'pixel_values': pixel_values} , training=lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFLayoutLMvaForSequenceClassification(config=lowercase__ )
lowerCamelCase__ = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFLayoutLMvaForTokenClassification(config=lowercase__ )
lowerCamelCase__ = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = 2
lowerCamelCase__ = TFLayoutLMvaForQuestionAnswering(config=lowercase__ )
lowerCamelCase__ = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = self.prepare_config_and_inputs()
(lowerCamelCase__) = config_and_inputs
lowerCamelCase__ = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class _a ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
a_ : Any = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
a_ : str = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
a_ : Union[str, Any] = False
a_ : str = False
a_ : Union[str, Any] = False
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ):
return True
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any=False ):
lowerCamelCase__ = copy.deepcopy(lowercase__ )
if model_class in get_values(lowercase__ ):
lowerCamelCase__ = {
k: tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowercase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase__ ):
lowerCamelCase__ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
lowerCamelCase__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
lowerCamelCase__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
lowerCamelCase__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
lowerCamelCase__ = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = TFLayoutLMvaModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def _UpperCamelCase ( self : int ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(lowercase__ )
if getattr(lowercase__ , 'hf_compute_loss' , lowercase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
lowerCamelCase__ = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
lowerCamelCase__ = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowercase__ )[0]
]
lowerCamelCase__ = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
lowerCamelCase__ = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
lowerCamelCase__ = prepared_for_class.pop('input_ids' )
lowerCamelCase__ = model(lowercase__ , **lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
lowerCamelCase__ = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
lowerCamelCase__ = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
lowerCamelCase__ = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
lowerCamelCase__ = -1_00
lowerCamelCase__ = tf.convert_to_tensor(lowercase__ )
lowerCamelCase__ = model(lowercase__ , **lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
lowerCamelCase__ = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
lowerCamelCase__ = model(lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
lowerCamelCase__ = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
# Get keys that were added with the _prepare_for_class function
lowerCamelCase__ = prepared_for_class.keys() - inputs_dict.keys()
lowerCamelCase__ = inspect.signature(model.call ).parameters
lowerCamelCase__ = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
lowerCamelCase__ = {0: "input_ids"}
for label_key in label_keys:
lowerCamelCase__ = signature_names.index(lowercase__ )
lowerCamelCase__ = label_key
lowerCamelCase__ = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
lowerCamelCase__ = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
lowerCamelCase__ = prepared_for_class[value]
lowerCamelCase__ = tuple(lowercase__ )
# Send to model
lowerCamelCase__ = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _UpperCamelCase ( self : Optional[Any] ):
(
lowerCamelCase__
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def _UpperCamelCase ( self : Dict ):
(
lowerCamelCase__
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ = type
self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def _UpperCamelCase ( self : Dict ):
(
lowerCamelCase__
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def _UpperCamelCase ( self : List[str] ):
(
lowerCamelCase__
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def _UpperCamelCase ( self : Optional[int] ):
(
lowerCamelCase__
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
@slow
def _UpperCamelCase ( self : List[str] ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFLayoutLMvaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def snake_case ( )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCamelCase ( self : Any ):
return LayoutLMvaImageProcessor(apply_ocr=lowercase__ ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=lowercase__ , return_tensors='tf' ).pixel_values
lowerCamelCase__ = tf.constant([[1, 2]] )
lowerCamelCase__ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
lowerCamelCase__ = model(input_ids=lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
# verify the logits
lowerCamelCase__ = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , lowercase__ )
lowerCamelCase__ = tf.constant(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ) )
| 702 |
"""simple docstring"""
def snake_case ( _a: list[list[float]] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for data in source_data:
for i, el in enumerate(_a ):
if len(_a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_a ) )
return data_lists
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for dlist, weight in zip(_a , _a ):
lowerCamelCase__ = min(_a )
lowerCamelCase__ = max(_a )
lowerCamelCase__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCamelCase__ = F'Invalid weight of {weight:f} provided'
raise ValueError(_a )
score_lists.append(_a )
return score_lists
def snake_case ( _a: list[list[float]] )-> list[float]:
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_a ):
lowerCamelCase__ = final_scores[j] + ele
return final_scores
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = get_data(_a )
lowerCamelCase__ = calculate_each_score(_a , _a )
lowerCamelCase__ = generate_final_scores(_a )
# append scores to source data
for i, ele in enumerate(_a ):
source_data[i].append(_a )
return source_data
| 659 | 0 |
"""simple docstring"""
from itertools import product
def snake_case ( _a: Tuple , _a: Any )-> list[int]:
'''simple docstring'''
lowerCamelCase__ = sides_number
lowerCamelCase__ = max_face_number * dice_number
lowerCamelCase__ = [0] * (max_total + 1)
lowerCamelCase__ = 1
lowerCamelCase__ = range(SCREAMING_SNAKE_CASE_ , max_face_number + 1 )
for dice_numbers in product(SCREAMING_SNAKE_CASE_ , repeat=SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ = sum(SCREAMING_SNAKE_CASE_ )
totals_frequencies[total] += 1
return totals_frequencies
def snake_case ( )-> float:
'''simple docstring'''
lowerCamelCase__ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCamelCase__ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCamelCase__ = 0
lowerCamelCase__ = 9
lowerCamelCase__ = 4 * 9
lowerCamelCase__ = 6
for peter_total in range(SCREAMING_SNAKE_CASE_ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCamelCase__ = (4**9) * (6**6)
lowerCamelCase__ = peter_wins_count / total_games_number
lowerCamelCase__ = round(SCREAMING_SNAKE_CASE_ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 703 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def snake_case ( _a: int , _a: int = 2 , _a: int = 1 , _a: int = 3 , )-> int | None:
'''simple docstring'''
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_a: int , _a: int , _a: int ) -> int:
return (pow(_a , 2 ) + step) % modulus
for _ in range(_a ):
# These track the position within the cycle detection logic.
lowerCamelCase__ = seed
lowerCamelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase__ = gcd(hare - tortoise , _a )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
_snake_case = parser.parse_args()
_snake_case = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
_snake_case = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 659 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_snake_case = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 704 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 659 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def snake_case ( _a: List[str] , _a: str , _a: Any , _a: str , _a: Tuple = None , _a: Optional[Any] = None , _a: str = None , )-> Union[str, Any]:
'''simple docstring'''
if config_name_or_path is None:
lowerCamelCase__ = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
lowerCamelCase__ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase__ = question_encoder_name_or_path
lowerCamelCase__ = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
lowerCamelCase__ = RagConfig.from_pretrained(__snake_case )
lowerCamelCase__ = AutoConfig.from_pretrained(__snake_case )
lowerCamelCase__ = AutoConfig.from_pretrained(__snake_case )
lowerCamelCase__ = gen_config
lowerCamelCase__ = question_encoder_config
lowerCamelCase__ = model_class.from_pretrained_question_encoder_generator(
__snake_case , __snake_case , config=__snake_case )
rag_model.save_pretrained(__snake_case )
# Sanity check.
model_class.from_pretrained(__snake_case )
# Save tokenizers.
lowerCamelCase__ = AutoTokenizer.from_pretrained(__snake_case )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
lowerCamelCase__ = AutoTokenizer.from_pretrained(__snake_case )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token"],
required=True,
type=str,
help="RAG model type: rag_sequence, rag_token",
)
parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.")
parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier")
parser.add_argument(
"--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier"
)
parser.add_argument(
"--generator_tokenizer_name_or_path",
type=str,
help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``",
)
parser.add_argument(
"--question_encoder_tokenizer_name_or_path",
type=str,
help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``",
)
parser.add_argument(
"--config_name_or_path",
type=str,
help=(
"Identifier of the model config to use, if not provided, resolves to a base config for a given"
" ``model_type``"
),
)
_snake_case = parser.parse_args()
_snake_case = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 705 |
"""simple docstring"""
from __future__ import annotations
_snake_case = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def snake_case ( _a: list[list[int]] , _a: list[int] , _a: list[int] , _a: int , _a: list[list[int]] , )-> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the reference grid
lowerCamelCase__ = 1
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the action grid
lowerCamelCase__ = init[0]
lowerCamelCase__ = init[1]
lowerCamelCase__ = 0
lowerCamelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase__ = [[f, g, x, y]]
lowerCamelCase__ = False # flag that is set when search is complete
lowerCamelCase__ = False # flag set if we can't find expand
while not found and not resign:
if len(_a ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase__ = cell.pop()
lowerCamelCase__ = next_cell[2]
lowerCamelCase__ = next_cell[3]
lowerCamelCase__ = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase__ = True
else:
for i in range(len(_a ) ): # to try out different valid actions
lowerCamelCase__ = x + DIRECTIONS[i][0]
lowerCamelCase__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_a ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase__ = g + cost
lowerCamelCase__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase__ = 1
lowerCamelCase__ = i
lowerCamelCase__ = []
lowerCamelCase__ = goal[0]
lowerCamelCase__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase__ = x - DIRECTIONS[action[x][y]][0]
lowerCamelCase__ = y - DIRECTIONS[action[x][y]][1]
lowerCamelCase__ = xa
lowerCamelCase__ = ya
invpath.append([x, y] )
lowerCamelCase__ = []
for i in range(len(_a ) ):
path.append(invpath[len(_a ) - 1 - i] )
return path, action
if __name__ == "__main__":
_snake_case = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_snake_case = [0, 0]
# all coordinates are given in format [y,x]
_snake_case = [len(grid) - 1, len(grid[0]) - 1]
_snake_case = 1
# the cost map which pushes the path closer to the goal
_snake_case = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_snake_case = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_snake_case = 99
_snake_case , _snake_case = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 659 | 0 |
"""simple docstring"""
import math
def snake_case ( _a: Any , _a: int )-> Optional[Any]:
'''simple docstring'''
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCAmelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
_snake_case = '''Enter the base and the power separated by a comma: '''
_snake_case = map(int, input(prompt).split(","))
_snake_case = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
_snake_case = res(xa, ya)
_snake_case = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 706 |
"""simple docstring"""
def snake_case ( _a: int = 4000000 )-> int:
'''simple docstring'''
lowerCamelCase__ = [0, 1]
lowerCamelCase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase__ = 0
for j in range(len(_a ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 659 | 0 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case ( _a: Dict , _a: Optional[Any] , _a: str )-> str:
'''simple docstring'''
lowerCamelCase__ = RemBertConfig.from_json_file(lowerCAmelCase_ )
print('Building PyTorch model from configuration: {}'.format(str(lowerCAmelCase_ ) ) )
lowerCamelCase__ = RemBertModel(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowerCAmelCase_ ) )
torch.save(model.state_dict() , lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 707 |
"""simple docstring"""
def snake_case ( _a: List[Any] , _a: Any , _a: str , _a: List[Any] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = []
queue.append(_a )
lowerCamelCase__ = True
while queue:
lowerCamelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
lowerCamelCase__ = True
lowerCamelCase__ = u
return visited[t]
def snake_case ( _a: List[Any] , _a: str , _a: List[str] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = [-1] * (len(_a ))
lowerCamelCase__ = 0
while bfs(_a , _a , _a , _a ):
lowerCamelCase__ = float('Inf' )
lowerCamelCase__ = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ = min(_a , graph[parent[s]][s] )
lowerCamelCase__ = parent[s]
max_flow += path_flow
lowerCamelCase__ = sink
while v != source:
lowerCamelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ = parent[v]
return max_flow
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_snake_case , _snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 659 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def snake_case ( _a: str )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = SwinConfig(image_size=192 )
if "base" in model_name:
lowerCamelCase__ = 6
lowerCamelCase__ = 128
lowerCamelCase__ = (2, 2, 18, 2)
lowerCamelCase__ = (4, 8, 16, 32)
elif "large" in model_name:
lowerCamelCase__ = 12
lowerCamelCase__ = 192
lowerCamelCase__ = (2, 2, 18, 2)
lowerCamelCase__ = (6, 12, 24, 48)
else:
raise ValueError('Model not supported, only supports base and large variants' )
lowerCamelCase__ = window_size
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = num_heads
return config
def snake_case ( _a: List[Any] )-> Optional[int]:
'''simple docstring'''
if "encoder.mask_token" in name:
lowerCamelCase__ = name.replace('encoder.mask_token' , 'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
lowerCamelCase__ = name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
lowerCamelCase__ = name.replace('encoder.patch_embed.norm' , 'embeddings.norm' )
if "attn.proj" in name:
lowerCamelCase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCamelCase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCamelCase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCamelCase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCamelCase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCamelCase__ = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
lowerCamelCase__ = 'layernorm.weight'
if name == "encoder.norm.bias":
lowerCamelCase__ = 'layernorm.bias'
if "decoder" in name:
pass
else:
lowerCamelCase__ = 'swin.' + name
return name
def snake_case ( _a: Any , _a: int )-> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCamelCase__ = orig_state_dict.pop(_snake_case )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowerCamelCase__ = key.split('.' )
lowerCamelCase__ = int(key_split[2] )
lowerCamelCase__ = int(key_split[4] )
lowerCamelCase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCamelCase__ = val[:dim, :]
lowerCamelCase__ = val[
dim : dim * 2, :
]
lowerCamelCase__ = val[-dim:, :]
else:
lowerCamelCase__ = val[
:dim
]
lowerCamelCase__ = val[
dim : dim * 2
]
lowerCamelCase__ = val[
-dim:
]
else:
lowerCamelCase__ = val
return orig_state_dict
def snake_case ( _a: List[str] , _a: str , _a: int , _a: Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = torch.load(_snake_case , map_location='cpu' )['model']
lowerCamelCase__ = get_swin_config(_snake_case )
lowerCamelCase__ = SwinForMaskedImageModeling(_snake_case )
model.eval()
lowerCamelCase__ = convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
lowerCamelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ = ViTImageProcessor(size={'height': 192, 'width': 192} )
lowerCamelCase__ = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
lowerCamelCase__ = image_processor(images=_snake_case , return_tensors='pt' )
with torch.no_grad():
lowerCamelCase__ = model(**_snake_case ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_snake_case )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
print(F'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(F'microsoft/{model_name}' )
image_processor.push_to_hub(F'microsoft/{model_name}' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="swin-base-simmim-window6-192",
type=str,
choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"],
help="Name of the Swin SimMIM model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth",
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_snake_case = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 708 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_snake_case = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[int]=1 ):
lowerCamelCase__ = tokenizer
lowerCamelCase__ = dataset
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ ) if n_tasks is None else n_tasks
lowerCamelCase__ = n_copies
def __iter__( self : Any ):
lowerCamelCase__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = start_length
lowerCamelCase__ = eof_strings
lowerCamelCase__ = tokenizer
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: List[Any] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = re.split('(%s)' % '|'.join(_a ) , _a )
# last string should be ""
return "".join(string_list[:-2] )
def snake_case ( _a: List[Any] , _a: Optional[int] , _a: str , _a: Union[str, Any] , _a: Dict , _a: Optional[int]=20 , **_a: Optional[int] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = defaultdict(_a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_a ) ):
with torch.no_grad():
lowerCamelCase__ = batch['ids'].shape[-1]
lowerCamelCase__ = accelerator.unwrap_model(_a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_a , **_a )
# each task is generated batch_size times
lowerCamelCase__ = batch['task_id'].repeat(_a )
lowerCamelCase__ = accelerator.pad_across_processes(
_a , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ = generated_tokens.cpu().numpy()
lowerCamelCase__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_a , _a ):
gen_token_dict[task].append(_a )
lowerCamelCase__ = [[] for _ in range(_a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ = tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
code_gens[task].append(remove_last_block(_a ) )
return code_gens
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser(_a )
lowerCamelCase__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ = 'false'
if args.num_workers is None:
lowerCamelCase__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ = Accelerator()
set_seed(args.seed , device_specific=_a )
# Load model and tokenizer
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ = tokenizer.eos_token
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _a , _a )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ = load_dataset('openai_humaneval' )
lowerCamelCase__ = load_metric('code_eval' )
lowerCamelCase__ = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ = args.n_samples // args.batch_size
lowerCamelCase__ = TokenizedDataset(_a , human_eval['test'] , n_copies=_a , n_tasks=_a )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ = DataLoader(_a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(_a , _a )
lowerCamelCase__ = complete_code(
_a , _a , _a , _a , n_tasks=_a , batch_size=args.batch_size , **_a , )
if accelerator.is_main_process:
lowerCamelCase__ = []
for task in tqdm(range(_a ) ):
lowerCamelCase__ = human_eval['test'][task]['test']
lowerCamelCase__ = F'check({human_eval["test"][task]["entry_point"]})'
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ = code_eval_metric.compute(
references=_a , predictions=_a , num_workers=args.num_workers )
print(F'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_a , _a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_snake_case = logging.get_logger(__name__)
class _a ( lowerCAmelCase__ ):
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : List[str] ):
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 709 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_a , help='where to store parsed gold_data_path file' , )
lowerCamelCase__ = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowerCamelCase__ = json.load(_a )
for dpr_record in tqdm(_a ):
lowerCamelCase__ = dpr_record['question']
lowerCamelCase__ = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_a ) + '\n' )
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
def snake_case ( _a: List[str] )-> Tuple:
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
_snake_case = int(input("Enter number: ").strip())
print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 710 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 0 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def snake_case ( _a: List[Any] , _a: List[str] )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm1.weight', F'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm1.bias', F'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.weight', F'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.bias', F'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm2.weight', F'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm2.bias', F'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.weight', F'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.bias', F'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc2.weight', F'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.mlp.fc2.bias', F'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('encoder.deit.cls_token', 'encoder.embeddings.cls_token'),
('encoder.deit.pos_embed', 'encoder.embeddings.position_embeddings'),
('encoder.deit.patch_embed.proj.weight', 'encoder.embeddings.patch_embeddings.projection.weight'),
('encoder.deit.patch_embed.proj.bias', 'encoder.embeddings.patch_embeddings.projection.bias'),
('encoder.deit.norm.weight', 'encoder.layernorm.weight'),
('encoder.deit.norm.bias', 'encoder.layernorm.bias'),
] )
return rename_keys
def snake_case ( _a: Tuple , _a: int )-> int:
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowerCamelCase__ = state_dict.pop(F'encoder.deit.blocks.{i}.attn.qkv.weight' )
lowerCamelCase__ = in_proj_weight[
: encoder_config.hidden_size, :
]
lowerCamelCase__ = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowerCamelCase__ = in_proj_weight[
-encoder_config.hidden_size :, :
]
def snake_case ( _a: Optional[int] , _a: Union[str, Any] , _a: str )-> str:
'''simple docstring'''
lowerCamelCase__ = dct.pop(_a )
lowerCamelCase__ = val
def snake_case ( _a: Tuple )-> Optional[Any]:
'''simple docstring'''
if "handwritten" in checkpoint_url:
lowerCamelCase__ = 'https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCamelCase__ = 'https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'
lowerCamelCase__ = Image.open(requests.get(_a , stream=_a ).raw ).convert('RGB' )
return im
@torch.no_grad()
def snake_case ( _a: Optional[int] , _a: Tuple )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = ViTConfig(image_size=384 , qkv_bias=_a )
lowerCamelCase__ = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowerCamelCase__ = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
lowerCamelCase__ = 1024
lowerCamelCase__ = 4096
lowerCamelCase__ = 24
lowerCamelCase__ = 16
lowerCamelCase__ = 1024
else:
raise ValueError('Should either find \'base\' or \'large\' in checkpoint URL' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCamelCase__ = False
lowerCamelCase__ = 'relu'
lowerCamelCase__ = 1024
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
# load HuggingFace model
lowerCamelCase__ = ViTModel(_a , add_pooling_layer=_a )
lowerCamelCase__ = TrOCRForCausalLM(_a )
lowerCamelCase__ = VisionEncoderDecoderModel(encoder=_a , decoder=_a )
model.eval()
# load state_dict of original model, rename some keys
lowerCamelCase__ = torch.hub.load_state_dict_from_url(_a , map_location='cpu' , check_hash=_a )['model']
lowerCamelCase__ = create_rename_keys(_a , _a )
for src, dest in rename_keys:
rename_key(_a , _a , _a )
read_in_q_k_v(_a , _a )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowerCamelCase__ = state_dict.pop(_a )
if key.startswith('decoder' ) and "output_projection" not in key:
lowerCamelCase__ = val
else:
lowerCamelCase__ = val
# load state dict
model.load_state_dict(_a )
# Check outputs on an image
lowerCamelCase__ = ViTImageProcessor(size=encoder_config.image_size )
lowerCamelCase__ = RobertaTokenizer.from_pretrained('roberta-large' )
lowerCamelCase__ = TrOCRProcessor(_a , _a )
lowerCamelCase__ = processor(images=prepare_img(_a ) , return_tensors='pt' ).pixel_values
# verify logits
lowerCamelCase__ = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowerCamelCase__ = model(pixel_values=_a , decoder_input_ids=_a )
lowerCamelCase__ = outputs.logits
lowerCamelCase__ = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
lowerCamelCase__ = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
lowerCamelCase__ = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
lowerCamelCase__ = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
lowerCamelCase__ = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _a , atol=1E-3 ), "First elements of logits not as expected"
Path(_a ).mkdir(exist_ok=_a )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_a )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_a )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_snake_case = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 711 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[str, Any] = 'swinv2'
a_ : Optional[int] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int=2_24 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : str=96 , SCREAMING_SNAKE_CASE__ : Dict=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4.0 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : int=32 , **SCREAMING_SNAKE_CASE__ : List[str] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
lowerCamelCase__ = (0, 0, 0, 0)
| 659 | 0 |
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[Any] = AlbertTokenizer
a_ : Tuple = AlbertTokenizerFast
a_ : Optional[Any] = True
a_ : Dict = True
a_ : Optional[Any] = True
def _UpperCamelCase ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = 'this is a test'
lowerCamelCase__ = 'this is a test'
return input_text, output_text
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = '<pad>'
lowerCamelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(__a ) , 3_00_00 )
def _UpperCamelCase ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def _UpperCamelCase ( self : Union[str, Any] ):
if not self.test_rust_tokenizer:
return
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_rust_tokenizer()
lowerCamelCase__ = 'I was born in 92000, and this is falsé.'
lowerCamelCase__ = tokenizer.tokenize(__a )
lowerCamelCase__ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase__ = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase__ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
lowerCamelCase__ = self.get_rust_tokenizer()
lowerCamelCase__ = tokenizer.encode(__a )
lowerCamelCase__ = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = AlbertTokenizer(__a , keep_accents=__a )
lowerCamelCase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(__a , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 12_89] )
lowerCamelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__a , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = AlbertTokenizer(__a )
lowerCamelCase__ = tokenizer.encode('sequence builders' )
lowerCamelCase__ = tokenizer.encode('multi-sequence build' )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(__a )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 712 |
"""simple docstring"""
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 0
while number > 0:
lowerCamelCase__ = number % 10
sum_of_digits += last_digit
lowerCamelCase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case ( _a: int = 100 )-> int:
'''simple docstring'''
lowerCamelCase__ = factorial(_a )
lowerCamelCase__ = split_and_add(_a )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 659 | 0 |
"""simple docstring"""
class _a :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
lowerCamelCase__ = arr.split(',' )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = [int(self.array[0] )] * len(self.array )
lowerCamelCase__ = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
lowerCamelCase__ = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
lowerCamelCase__ = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
_snake_case = input("please input some numbers:")
_snake_case = SubArray(whole_array)
_snake_case = array.solve_sub_array()
print(("the results is:", re))
| 713 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def snake_case ( _a: List[str] )-> Optional[MinHash]:
'''simple docstring'''
if len(_a ) < MIN_NUM_TOKENS:
return None
lowerCamelCase__ = MinHash(num_perm=_a )
for token in set(_a ):
min_hash.update(token.encode() )
return min_hash
def snake_case ( _a: str )-> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(_a ) if len(t.strip() ) > 0}
class _a :
def __init__( self : List[Any] , *,
SCREAMING_SNAKE_CASE__ : float = 0.85 , ):
lowerCamelCase__ = duplication_jaccard_threshold
lowerCamelCase__ = NUM_PERM
lowerCamelCase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : MinHash ):
lowerCamelCase__ = self._index.query(SCREAMING_SNAKE_CASE__ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(SCREAMING_SNAKE_CASE__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCamelCase__ = [base] + list(SCREAMING_SNAKE_CASE__ )
# reformat the cluster to be a list of dict
lowerCamelCase__ = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(SCREAMING_SNAKE_CASE__ )
return duplicate_clusters
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.get_duplicate_clusters()
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = element
lowerCamelCase__ = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case ( _a: Type[Dataset] )-> Tuple:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_a , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case ( _a: Type[Dataset] , _a: float )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = DuplicationIndex(duplication_jaccard_threshold=_a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_a ) ) , max_queue_size=100 ) ):
di.add(_a , _a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case ( _a: str , _a: str )-> float:
'''simple docstring'''
lowerCamelCase__ = get_tokens(_a )
lowerCamelCase__ = get_tokens(_a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def snake_case ( _a: Dict , _a: Union[str, Any] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for elementa in cluster:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_a , _a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCamelCase__ = 1
extremes.append(_a )
return extremes
def snake_case ( _a: Any , _a: Tuple , _a: Dict )-> Union[str, Any]:
'''simple docstring'''
global _shared_dataset
lowerCamelCase__ = dataset
lowerCamelCase__ = []
lowerCamelCase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=_a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_a , _a , ) , total=len(_a ) , ):
extremes_list.append(_a )
return extremes_list
def snake_case ( _a: Type[Dataset] , _a: float = 0.85 )-> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowerCamelCase__ = make_duplicate_clusters(_a , _a )
lowerCamelCase__ = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
lowerCamelCase__ = {}
lowerCamelCase__ = find_extremes(_a , _a , _a )
for extremes in extremes_clusters:
for element in extremes:
lowerCamelCase__ = element
lowerCamelCase__ = duplicate_indices - set(extreme_dict.keys() )
lowerCamelCase__ = dataset.filter(lambda _a , _a : idx not in remove_indices , with_indices=_a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCamelCase__ = element['base_index'] in extreme_dict
if element["is_extreme"]:
lowerCamelCase__ = extreme_dict[element['base_index']]['copies']
print(F'Original dataset size: {len(_a )}' )
print(F'Number of duplicate clusters: {len(_a )}' )
print(F'Files in duplicate cluster: {len(_a )}' )
print(F'Unique files in duplicate cluster: {len(_a )}' )
print(F'Filtered dataset size: {len(_a )}' )
return ds_filter, duplicate_clusters
| 659 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def snake_case ( _a: str , _a: str , _a: str , _a: int , _a: Union[str, Any] )-> Any:
'''simple docstring'''
for attribute in key.split('.' ):
lowerCamelCase__ = getattr(A__ , A__ )
if weight_type is not None:
lowerCamelCase__ = getattr(A__ , A__ ).shape
else:
lowerCamelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowerCamelCase__ = value
elif weight_type == "weight_g":
lowerCamelCase__ = value
elif weight_type == "weight_v":
lowerCamelCase__ = value
elif weight_type == "bias":
lowerCamelCase__ = value
else:
lowerCamelCase__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def snake_case ( _a: Tuple , _a: Optional[int] , _a: List[str] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = fairseq_model.state_dict()
lowerCamelCase__ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__ = False
if "conv_layers" in name:
load_conv_layer(
A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == 'group' , )
lowerCamelCase__ = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__ = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCamelCase__ = True
if "*" in mapped_key:
lowerCamelCase__ = name.split(A__ )[0].split('.' )[-2]
lowerCamelCase__ = mapped_key.replace('*' , A__ )
if "weight_g" in name:
lowerCamelCase__ = 'weight_g'
elif "weight_v" in name:
lowerCamelCase__ = 'weight_v'
elif "weight" in name:
lowerCamelCase__ = 'weight'
elif "bias" in name:
lowerCamelCase__ = 'bias'
else:
lowerCamelCase__ = None
set_recursively(A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(F'Unused weights: {unused_weights}' )
def snake_case ( _a: Dict , _a: int , _a: Dict , _a: Optional[Any] , _a: Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = full_name.split('conv_layers.' )[-1]
lowerCamelCase__ = name.split('.' )
lowerCamelCase__ = int(items[0] )
lowerCamelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowerCamelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowerCamelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowerCamelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowerCamelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(A__ )
def snake_case ( _a: List[str] , _a: Optional[int] )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = SEWConfig()
if is_finetuned:
lowerCamelCase__ = model.wav_encoder.wav_model.cfg
else:
lowerCamelCase__ = model.cfg
lowerCamelCase__ = fs_config.conv_bias
lowerCamelCase__ = eval(fs_config.conv_feature_layers )
lowerCamelCase__ = [x[0] for x in conv_layers]
lowerCamelCase__ = [x[1] for x in conv_layers]
lowerCamelCase__ = [x[2] for x in conv_layers]
lowerCamelCase__ = 'gelu'
lowerCamelCase__ = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
lowerCamelCase__ = 0.0
lowerCamelCase__ = fs_config.activation_fn.name
lowerCamelCase__ = fs_config.encoder_embed_dim
lowerCamelCase__ = 0.02
lowerCamelCase__ = fs_config.encoder_ffn_embed_dim
lowerCamelCase__ = 1E-5
lowerCamelCase__ = fs_config.encoder_layerdrop
lowerCamelCase__ = fs_config.encoder_attention_heads
lowerCamelCase__ = fs_config.conv_pos_groups
lowerCamelCase__ = fs_config.conv_pos
lowerCamelCase__ = len(A__ )
lowerCamelCase__ = fs_config.encoder_layers
lowerCamelCase__ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowerCamelCase__ = model.cfg
lowerCamelCase__ = fs_config.final_dropout
lowerCamelCase__ = fs_config.layerdrop
lowerCamelCase__ = fs_config.activation_dropout
lowerCamelCase__ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowerCamelCase__ = fs_config.attention_dropout
lowerCamelCase__ = fs_config.dropout_input
lowerCamelCase__ = fs_config.dropout
lowerCamelCase__ = fs_config.mask_channel_length
lowerCamelCase__ = fs_config.mask_channel_prob
lowerCamelCase__ = fs_config.mask_length
lowerCamelCase__ = fs_config.mask_prob
lowerCamelCase__ = 'Wav2Vec2FeatureExtractor'
lowerCamelCase__ = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def snake_case ( _a: List[str] , _a: int , _a: Any=None , _a: str=None , _a: Optional[int]=True )-> Tuple:
'''simple docstring'''
if is_finetuned:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowerCamelCase__ = SEWConfig.from_pretrained(A__ )
else:
lowerCamelCase__ = convert_config(model[0] , A__ )
lowerCamelCase__ = model[0].eval()
lowerCamelCase__ = True if config.feat_extract_norm == 'layer' else False
lowerCamelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=A__ , return_attention_mask=A__ , )
if is_finetuned:
if dict_path:
lowerCamelCase__ = Dictionary.load(A__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__ = target_dict.pad_index
lowerCamelCase__ = target_dict.bos_index
lowerCamelCase__ = target_dict.pad_index
lowerCamelCase__ = target_dict.bos_index
lowerCamelCase__ = target_dict.eos_index
lowerCamelCase__ = len(target_dict.symbols )
lowerCamelCase__ = os.path.join(A__ , 'vocab.json' )
if not os.path.isdir(A__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(A__ ) )
return
os.makedirs(A__ , exist_ok=A__ )
with open(A__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , A__ )
lowerCamelCase__ = WavaVecaCTCTokenizer(
A__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=A__ , )
lowerCamelCase__ = WavaVecaProcessor(feature_extractor=A__ , tokenizer=A__ )
processor.save_pretrained(A__ )
lowerCamelCase__ = SEWForCTC(A__ )
else:
lowerCamelCase__ = SEWModel(A__ )
feature_extractor.save_pretrained(A__ )
recursively_load_weights(A__ , A__ , A__ )
hf_model.save_pretrained(A__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_snake_case = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 714 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_snake_case = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def snake_case ( _a: Any )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = test_results.split(' ' )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase__ = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def snake_case ( _a: Optional[int] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = {}
lowerCamelCase__ = None
lowerCamelCase__ = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , _a ):
lowerCamelCase__ = True
lowerCamelCase__ = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
lowerCamelCase__ = line
lowerCamelCase__ = False
return failures
class _a :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = title
lowerCamelCase__ = doc_test_results['time_spent'].split(',' )[0]
lowerCamelCase__ = doc_test_results['success']
lowerCamelCase__ = doc_test_results['failures']
lowerCamelCase__ = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase__ = doc_test_results
@property
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = [self._time_spent]
lowerCamelCase__ = 0
for time in time_spent:
lowerCamelCase__ = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
lowerCamelCase__ = [0, 0, time_parts[0]]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'
@property
def _UpperCamelCase ( self : Dict ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self : Dict ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Any ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = 40
lowerCamelCase__ = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
lowerCamelCase__ = ''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def _UpperCamelCase ( ):
lowerCamelCase__ = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[int] ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
lowerCamelCase__ = F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
lowerCamelCase__ = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = ''
for key, value in failures.items():
lowerCamelCase__ = value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value
failures_text += F'*{key}*\n_{value}_\n\n'
lowerCamelCase__ = job_name
lowerCamelCase__ = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
lowerCamelCase__ = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self : Optional[int] ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
lowerCamelCase__ = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
lowerCamelCase__ = sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
lowerCamelCase__ = F'*Num failures* :{len(job_result["failed"] )} \n'
lowerCamelCase__ = job_result['failures']
lowerCamelCase__ = self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def snake_case ( )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = os.environ['GITHUB_RUN_ID']
lowerCamelCase__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
lowerCamelCase__ = requests.get(_a ).json()
lowerCamelCase__ = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
lowerCamelCase__ = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_a ):
lowerCamelCase__ = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , _a )
return {}
def snake_case ( _a: str )-> Dict:
'''simple docstring'''
lowerCamelCase__ = {}
if os.path.exists(_a ):
lowerCamelCase__ = os.listdir(_a )
for file in files:
try:
with open(os.path.join(_a , _a ) , encoding='utf-8' ) as f:
lowerCamelCase__ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(_a , _a )}.' ) from e
return _artifact
def snake_case ( )-> Optional[int]:
'''simple docstring'''
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = name
lowerCamelCase__ = []
def __str__( self : Dict ):
return self.name
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
self.paths.append({'name': self.name, 'path': path} )
lowerCamelCase__ = {}
lowerCamelCase__ = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase__ = directory
if artifact_name not in _available_artifacts:
lowerCamelCase__ = Artifact(_a )
_available_artifacts[artifact_name].add_path(_a )
return _available_artifacts
if __name__ == "__main__":
_snake_case = get_job_links()
_snake_case = retrieve_available_artifacts()
_snake_case = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_snake_case = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_snake_case = github_actions_job_links.get("run_doctests")
_snake_case = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_snake_case = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_snake_case , _snake_case , _snake_case = handle_test_results(artifact["stats"])
_snake_case = failed
_snake_case = success
_snake_case = time_spent[1:-1] + ", "
_snake_case = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_snake_case = line.replace("FAILED ", "")
_snake_case = line.split()[0].replace("\n", "")
if "::" in line:
_snake_case , _snake_case = line.split("::")
else:
_snake_case , _snake_case = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_snake_case = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_snake_case = all_failures[test] if test in all_failures else "N/A"
_snake_case = failure
break
_snake_case = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 659 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( _UpperCamelCase , unittest.TestCase ):
a_ : Optional[int] = GPTaTokenizer
a_ : str = GPTaTokenizerFast
a_ : int = True
a_ : Optional[Any] = {'add_prefix_space': True}
a_ : List[str] = False
def _UpperCamelCase ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowerCamelCase__ = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
lowerCamelCase__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCamelCase__ = {"unk_token": "<unk>"}
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self : Tuple , **SCREAMING_SNAKE_CASE__ : int ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Dict , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = "lower newer"
lowerCamelCase__ = "lower newer"
return input_text, output_text
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase__ = "lower newer"
lowerCamelCase__ = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowerCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokens + [tokenizer.unk_token]
lowerCamelCase__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = "lower newer"
# Testing tokenization
lowerCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing conversion to ids without special tokens
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing conversion to ids with special tokens
lowerCamelCase__ = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing the unknown token
lowerCamelCase__ = tokens + [rust_tokenizer.unk_token]
lowerCamelCase__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# Simple input
lowerCamelCase__ = "This is a simple input"
lowerCamelCase__ = ["This is a simple input 1", "This is a simple input 2"]
lowerCamelCase__ = ("This is a simple input", "This is a pair")
lowerCamelCase__ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='max_length' , )
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
lowerCamelCase__ = "This is a simple input"
lowerCamelCase__ = ["This is a simple input looooooooong", "This is a simple input"]
lowerCamelCase__ = ("This is a simple input", "This is a pair")
lowerCamelCase__ = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowerCamelCase__ = tokenizer.pad_token_id
lowerCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=30 , return_tensors='np' )
lowerCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
lowerCamelCase__ = tokenizer(*SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=60 , return_tensors='np' )
lowerCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = "$$$"
lowerCamelCase__ = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE__ , add_bos_token=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = "This is a simple input"
lowerCamelCase__ = ["This is a simple input 1", "This is a simple input 2"]
lowerCamelCase__ = tokenizer.bos_token_id
lowerCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCamelCase__ = tokenizer.decode(out_s.input_ids )
lowerCamelCase__ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def _UpperCamelCase ( self : Optional[int] ):
pass
def _UpperCamelCase ( self : Tuple ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowerCamelCase__ = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , add_bos_token=SCREAMING_SNAKE_CASE__ )]
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCamelCase__ = "Encode this."
lowerCamelCase__ = "This one too please."
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.encode_plus(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_special_tokens_mask=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = encoded_sequence_dict["input_ids"]
lowerCamelCase__ = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE__ )
]
lowerCamelCase__ = [x for x in filtered_sequence if x is not None]
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@require_tokenizers
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : Union[str, Any] ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowerCamelCase__ = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = "A photo of a cat"
lowerCamelCase__ = tokenizer.encode(
SCREAMING_SNAKE_CASE__ , )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [2, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained('test_opt' )
lowerCamelCase__ = AutoTokenizer.from_pretrained('./test_opt' )
lowerCamelCase__ = tokenizer.encode(
SCREAMING_SNAKE_CASE__ , )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [2, 2_50, 13_45, 9, 10, 47_58] )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = "A photo of a cat"
lowerCamelCase__ = tokenizer.encode(
SCREAMING_SNAKE_CASE__ , )
# Same as above
self.assertEqual(SCREAMING_SNAKE_CASE__ , [2, 2_50, 13_45, 9, 10, 47_58] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = "bos"
lowerCamelCase__ = tokenizer.get_vocab()["bos"]
lowerCamelCase__ = "A photo of a cat"
lowerCamelCase__ = tokenizer.encode(
SCREAMING_SNAKE_CASE__ , )
# We changed the bos token
self.assertEqual(SCREAMING_SNAKE_CASE__ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained('./tok' )
lowerCamelCase__ = AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
lowerCamelCase__ = tokenizer.encode(
SCREAMING_SNAKE_CASE__ , )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
| 715 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[PIL.Image.Image, np.ndarray]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : PriorTransformer , SCREAMING_SNAKE_CASE__ : CLIPVisionModel , SCREAMING_SNAKE_CASE__ : CLIPImageProcessor , SCREAMING_SNAKE_CASE__ : HeunDiscreteScheduler , SCREAMING_SNAKE_CASE__ : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=SCREAMING_SNAKE_CASE__ , image_encoder=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , renderer=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
if latents is None:
lowerCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase__ = latents.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase__ = torch.device(F'cuda:{gpu_id}' )
lowerCamelCase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self : Dict ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE__ , axis=0 ) if image[0].ndim == 4 else torch.stack(SCREAMING_SNAKE_CASE__ , axis=0 )
if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
lowerCamelCase__ = image.to(dtype=self.image_encoder.dtype , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.image_encoder(SCREAMING_SNAKE_CASE__ )['last_hidden_state']
lowerCamelCase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase__ = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ = torch.zeros_like(SCREAMING_SNAKE_CASE__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 25 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : float = 4.0 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ):
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowerCamelCase__ = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = image.shape[0]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(SCREAMING_SNAKE_CASE__ )}' )
lowerCamelCase__ = self._execution_device
lowerCamelCase__ = batch_size * num_images_per_prompt
lowerCamelCase__ = guidance_scale > 1.0
lowerCamelCase__ = self._encode_image(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# prior
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.scheduler.timesteps
lowerCamelCase__ = self.prior.config.num_embeddings
lowerCamelCase__ = self.prior.config.embedding_dim
lowerCamelCase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase__ = latents.reshape(latents.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.prior(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , proj_embedding=SCREAMING_SNAKE_CASE__ , ).predicted_image_embedding
# remove the variance
lowerCamelCase__ , lowerCamelCase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 )
lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase__ = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
for i, latent in enumerate(SCREAMING_SNAKE_CASE__ ):
print()
lowerCamelCase__ = self.renderer.decode(
latent[None, :] , SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.stack(SCREAMING_SNAKE_CASE__ )
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase__ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase__ = [self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
from collections.abc import Callable
class _a :
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Callable | None = None ):
lowerCamelCase__ = []
# Stores indexes of each item for supporting updates and deletion.
lowerCamelCase__ = {}
# Stores current size of heap.
lowerCamelCase__ = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowerCamelCase__ = key or (lambda SCREAMING_SNAKE_CASE__ : x)
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
return int((i - 1) / 2 ) if i > 0 else None
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = int(2 * i + 1 )
return left if 0 < left < self.size else None
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = int(2 * i + 2 )
return right if 0 < right < self.size else None
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ , lowerCamelCase__ = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowerCamelCase__ , lowerCamelCase__ = self.arr[j], self.arr[i]
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
return self.arr[i][1] < self.arr[j][1]
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self._left(lowercase_ )
lowerCamelCase__ = self._right(lowercase_ )
lowerCamelCase__ = i
if left is not None and not self._cmp(lowercase_ , lowercase_ ):
lowerCamelCase__ = left
if right is not None and not self._cmp(lowercase_ , lowercase_ ):
lowerCamelCase__ = right
return valid_parent
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self._parent(lowercase_ )
while parent is not None and not self._cmp(lowercase_ , lowercase_ ):
self._swap(lowercase_ , lowercase_ )
lowerCamelCase__ , lowerCamelCase__ = parent, self._parent(lowercase_ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self._get_valid_parent(lowercase_ )
while valid_parent != index:
self._swap(lowercase_ , lowercase_ )
lowerCamelCase__ , lowerCamelCase__ = valid_parent, self._get_valid_parent(lowercase_ )
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
if item not in self.pos_map:
return
lowerCamelCase__ = self.pos_map[item]
lowerCamelCase__ = [item, self.key(lowercase_ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowercase_ )
self._heapify_down(lowercase_ )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int ):
if item not in self.pos_map:
return
lowerCamelCase__ = self.pos_map[item]
del self.pos_map[item]
lowerCamelCase__ = self.arr[self.size - 1]
lowerCamelCase__ = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowercase_ )
self._heapify_down(lowercase_ )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowercase_ )] )
else:
lowerCamelCase__ = [item, self.key(lowercase_ )]
lowerCamelCase__ = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def _UpperCamelCase ( self : Optional[Any] ):
return self.arr[0] if self.size else None
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def snake_case ( )-> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_snake_case = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = VOCAB_FILES_NAMES
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = ['input_ids', 'attention_mask']
a_ : Union[str, Any] = NllbTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : Any="<mask>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=False , **SCREAMING_SNAKE_CASE__ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self : List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 659 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class _a ( __lowerCAmelCase ):
a_ : Tuple = '''philschmid/bart-large-cnn-samsum'''
a_ : Tuple = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
a_ : List[Any] = '''summarizer'''
a_ : List[str] = AutoTokenizer
a_ : Dict = AutoModelForSeqaSeqLM
a_ : int = ['''text''']
a_ : List[Any] = ['''text''']
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
return self.pre_processor(lowerCAmelCase_ , return_tensors='pt' , truncation=lowerCAmelCase_ )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return self.model.generate(**lowerCAmelCase_ )[0]
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict ):
return self.pre_processor.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
| 717 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=99 , SCREAMING_SNAKE_CASE__ : Optional[Any]=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=37 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=5_12 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Any=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
lowerCamelCase__ = bos_token_id
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase__ = input_mask.numpy()
lowerCamelCase__ , lowerCamelCase__ = input_mask.shape
lowerCamelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = 1
lowerCamelCase__ = 0
lowerCamelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFBlipTextModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : str = (TFBlipTextModel,) if is_tf_available() else ()
a_ : List[str] = False
a_ : Optional[Any] = False
a_ : Union[str, Any] = False
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = BlipTextModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
pass
def _UpperCamelCase ( self : Tuple ):
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCamelCase ( self : List[str] ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : Dict ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : List[Any] ):
pass
@slow
def _UpperCamelCase ( self : str ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_snake_case = logging.get_logger(__name__)
def snake_case ( )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = os.getenv('SM_HP_MP_PARAMETERS' , '{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCamelCase__ = json.loads(_a )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCamelCase__ = os.getenv('SM_FRAMEWORK_PARAMS' , '{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCamelCase__ = json.loads(_a )
if not mpi_options.get('sagemaker_mpi_enabled' , _a ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : str = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def _UpperCamelCase ( self : Tuple ):
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , UpperCAmelCase__ , )
@cached_property
def _UpperCamelCase ( self : Tuple ):
logger.info('PyTorch: setting up devices' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' )
if self.no_cuda:
lowerCamelCase__ = torch.device('cpu' )
lowerCamelCase__ = 0
elif is_sagemaker_model_parallel_available():
lowerCamelCase__ = smp.local_rank()
lowerCamelCase__ = torch.device('cuda' , UpperCAmelCase__ )
lowerCamelCase__ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta )
lowerCamelCase__ = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) )
lowerCamelCase__ = torch.device('cuda' , self.local_rank )
lowerCamelCase__ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCamelCase__ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCamelCase__ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta )
lowerCamelCase__ = torch.device('cuda' , self.local_rank )
lowerCamelCase__ = 1
if device.type == "cuda":
torch.cuda.set_device(UpperCAmelCase__ )
return device
@property
def _UpperCamelCase ( self : List[Any] ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def _UpperCamelCase ( self : Tuple ):
return not is_sagemaker_model_parallel_available()
@property
def _UpperCamelCase ( self : Optional[Any] ):
return False
| 718 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 659 | 0 |
"""simple docstring"""
from __future__ import annotations
def snake_case ( _a: Union[str, Any] )-> bool:
'''simple docstring'''
if len(_a ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
lowerCamelCase__ = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
_snake_case = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {f"""funnel-transformer/{name}""": 512 for name in _model_names}
_snake_case = {f"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = VOCAB_FILES_NAMES
a_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
a_ : List[str] = FunnelTokenizer
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : int = 2
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : List[Any]="<sep>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : Tuple="<cls>" , SCREAMING_SNAKE_CASE__ : Tuple="<mask>" , SCREAMING_SNAKE_CASE__ : Any="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : int="##" , **SCREAMING_SNAKE_CASE__ : Any , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , clean_text=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , wordpieces_prefix=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('type' ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = do_lower_case
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
lowerCamelCase__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_snake_case = "\\n\n"
_snake_case = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
_snake_case = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def _UpperCamelCase ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int = 16 , SCREAMING_SNAKE_CASE__ : int = True , SCREAMING_SNAKE_CASE__ : str=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
lowerCamelCase__ = 'cuda'
else:
lowerCamelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = model.to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
lowerCamelCase__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(SCREAMING_SNAKE_CASE_ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
lowerCamelCase__ = model.config.max_length - 1
else:
lowerCamelCase__ = model.config.max_length
lowerCamelCase__ = tokenizer(
SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = encodings['input_ids']
lowerCamelCase__ = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
lowerCamelCase__ = []
lowerCamelCase__ = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ):
lowerCamelCase__ = min(start_index + batch_size , len(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__ = encoded_texts[start_index:end_index]
lowerCamelCase__ = attn_masks[start_index:end_index]
if add_start_token:
lowerCamelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
lowerCamelCase__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(SCREAMING_SNAKE_CASE_ ), attn_mask] , dim=1 )
lowerCamelCase__ = encoded_batch
with torch.no_grad():
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).logits
lowerCamelCase__ = out_logits[..., :-1, :].contiguous()
lowerCamelCase__ = labels[..., 1:].contiguous()
lowerCamelCase__ = attn_mask[..., 1:].contiguous()
lowerCamelCase__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , SCREAMING_SNAKE_CASE_ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(SCREAMING_SNAKE_CASE_ )}
| 720 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case ( _a: Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = [-1] * len(_a )
def dfs(_a: Any , _a: Optional[int] ):
lowerCamelCase__ = True
lowerCamelCase__ = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 659 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_snake_case = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_snake_case = TypeVar("KEY")
_snake_case = TypeVar("VAL")
@dataclass(frozen=SCREAMING_SNAKE_CASE_ , slots=SCREAMING_SNAKE_CASE_ )
class _a ( Generic[KEY, VAL] ):
a_ : KEY
a_ : VAL
class _a ( _Item ):
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : str ):
return False
_snake_case = _DeletedItem()
class _a ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ):
lowerCamelCase__ = initial_block_size
lowerCamelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ = capacity_factor
lowerCamelCase__ = 0
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY ):
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int ):
return (ind + 1) % len(self._buckets )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
lowerCamelCase__ = self._buckets[ind]
if not stored:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self._buckets
lowerCamelCase__ = [None] * new_size
lowerCamelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _UpperCamelCase ( self : List[str] ):
self._resize(len(self._buckets ) * 2 )
def _UpperCamelCase ( self : Optional[int] ):
self._resize(len(self._buckets ) // 2 )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ):
lowerCamelCase__ = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : List[Any] ):
return self._len
def __iter__( self : Optional[int] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowerCamelCase__ = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 659 | 0 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _a ( unittest.TestCase ):
a_ : Optional[int] = MODEL_FOR_MASKED_LM_MAPPING
a_ : List[Any] = TF_MODEL_FOR_MASKED_LM_MAPPING
def _UpperCamelCase ( self : str ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
lowerCamelCase__ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 3_80_15, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 2_55_06, 'token_str': ' accuser'},
] , )
lowerCamelCase__ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1e-05,
'token': 3_80_15,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1e-05,
'token': 2_55_06,
'token_str': ' accuser',
},
] , )
lowerCamelCase__ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 1_36_06, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 34_99, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 29_41, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
lowerCamelCase__ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 3_56_76, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 1_64_16, 'token_str': 'ELS'},
] , )
lowerCamelCase__ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2e-05,
'token': 3_56_76,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 1_64_16, 'token_str': 'ELS'},
] , )
lowerCamelCase__ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 34_99, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2e-05, 'token': 29_41, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 1_36_06, 'token_str': ' Clara'},
] , )
lowerCamelCase__ = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=6 ) , [
[
{
'score': 2.2e-05,
'token': 3_56_76,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2e-05, 'token': 1_64_16, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2e-05,
'token': 3_56_76,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2e-05, 'token': 1_64_16, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
lowerCamelCase__ = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
@require_torch
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(UpperCAmelCase__ )
@slow
@require_tf
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(UpperCAmelCase__ )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
{'sequence': 'My name is John', 'score': 0.0_08, 'token': 6_10, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.0_07, 'token': 15_73, 'token_str': ' Chris'},
] , )
lowerCamelCase__ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.2_51,
'token': 22_01,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.2_14,
'token': 1_27_90,
'token_str': ' Lyon',
},
] , )
lowerCamelCase__ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
{'sequence': 'My name is Patrick', 'score': 0.0_05, 'token': 34_99, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.0_00, 'token': 1_36_06, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.0_00, 'token': 29_41, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
lowerCamelCase__ = None
lowerCamelCase__ = None
self.run_pipeline_test(UpperCAmelCase__ , [] )
@require_tf
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
lowerCamelCase__ = None
lowerCamelCase__ = None
self.run_pipeline_test(UpperCAmelCase__ , [] )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
lowerCamelCase__ = FillMaskPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
lowerCamelCase__ = [
F'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = fill_masker.tokenizer
lowerCamelCase__ = fill_masker.model
lowerCamelCase__ = fill_masker(
F'This is a {tokenizer.mask_token}' , )
self.assertEqual(
UpperCAmelCase__ , [
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
] , )
lowerCamelCase__ = fill_masker([F'This is a {tokenizer.mask_token}'] )
self.assertEqual(
UpperCAmelCase__ , [
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
] , )
lowerCamelCase__ = fill_masker([F'This is a {tokenizer.mask_token}', F'Another {tokenizer.mask_token} great test.'] )
self.assertEqual(
UpperCAmelCase__ , [
[
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
],
[
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
],
] , )
with self.assertRaises(UpperCAmelCase__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(UpperCAmelCase__ ):
fill_masker('This is' )
self.run_test_top_k(UpperCAmelCase__ , UpperCAmelCase__ )
self.run_test_targets(UpperCAmelCase__ , UpperCAmelCase__ )
self.run_test_top_k_targets(UpperCAmelCase__ , UpperCAmelCase__ )
self.fill_mask_with_duplicate_targets_and_top_k(UpperCAmelCase__ , UpperCAmelCase__ )
self.fill_mask_with_multiple_masks(UpperCAmelCase__ , UpperCAmelCase__ )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = tokenizer.get_vocab()
lowerCamelCase__ = sorted(vocab.keys() )[:2]
# Pipeline argument
lowerCamelCase__ = FillMaskPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , targets=UpperCAmelCase__ )
lowerCamelCase__ = fill_masker(F'This is a {tokenizer.mask_token}' )
self.assertEqual(
UpperCAmelCase__ , [
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
] , )
lowerCamelCase__ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , UpperCAmelCase__ )
lowerCamelCase__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(UpperCAmelCase__ ) )
# Call argument
lowerCamelCase__ = FillMaskPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
lowerCamelCase__ = fill_masker(F'This is a {tokenizer.mask_token}' , targets=UpperCAmelCase__ )
self.assertEqual(
UpperCAmelCase__ , [
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
] , )
lowerCamelCase__ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , UpperCAmelCase__ )
lowerCamelCase__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(UpperCAmelCase__ ) )
# Score equivalence
lowerCamelCase__ = fill_masker(F'This is a {tokenizer.mask_token}' , targets=UpperCAmelCase__ )
lowerCamelCase__ = [top_mask['''token_str'''] for top_mask in outputs]
lowerCamelCase__ = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(UpperCAmelCase__ ) == set(UpperCAmelCase__ ):
lowerCamelCase__ = fill_masker(F'This is a {tokenizer.mask_token}' , targets=UpperCAmelCase__ )
lowerCamelCase__ = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , nested_simplify(UpperCAmelCase__ ) )
# Raises with invalid
with self.assertRaises(UpperCAmelCase__ ):
lowerCamelCase__ = fill_masker(F'This is a {tokenizer.mask_token}' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(UpperCAmelCase__ ):
lowerCamelCase__ = fill_masker(F'This is a {tokenizer.mask_token}' , targets=[''] )
with self.assertRaises(UpperCAmelCase__ ):
lowerCamelCase__ = fill_masker(F'This is a {tokenizer.mask_token}' , targets='' )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] ):
lowerCamelCase__ = FillMaskPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , top_k=2 )
lowerCamelCase__ = fill_masker(F'This is a {tokenizer.mask_token}' )
self.assertEqual(
UpperCAmelCase__ , [
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
] , )
lowerCamelCase__ = FillMaskPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
lowerCamelCase__ = fill_masker(F'This is a {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
UpperCAmelCase__ , [
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
] , )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , nested_simplify(UpperCAmelCase__ ) )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = tokenizer.get_vocab()
lowerCamelCase__ = FillMaskPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
# top_k=2, ntargets=3
lowerCamelCase__ = sorted(vocab.keys() )[:3]
lowerCamelCase__ = fill_masker(F'This is a {tokenizer.mask_token}' , top_k=2 , targets=UpperCAmelCase__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
lowerCamelCase__ = [el['''token_str'''] for el in sorted(UpperCAmelCase__ , key=lambda SCREAMING_SNAKE_CASE__ : x["score"] , reverse=UpperCAmelCase__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(UpperCAmelCase__ ).issubset(UpperCAmelCase__ ):
lowerCamelCase__ = fill_masker(F'This is a {tokenizer.mask_token}' , top_k=3 , targets=UpperCAmelCase__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , nested_simplify(UpperCAmelCase__ ) )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = FillMaskPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
lowerCamelCase__ = tokenizer.get_vocab()
# String duplicates + id duplicates
lowerCamelCase__ = sorted(vocab.keys() )[:3]
lowerCamelCase__ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
lowerCamelCase__ = fill_masker(F'My name is {tokenizer.mask_token}' , targets=UpperCAmelCase__ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(UpperCAmelCase__ ) , 3 )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ):
lowerCamelCase__ = FillMaskPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
lowerCamelCase__ = fill_masker(
F'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
UpperCAmelCase__ , [
[
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
],
[
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
],
[
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
{'sequence': ANY(UpperCAmelCase__ ), 'score': ANY(UpperCAmelCase__ ), 'token': ANY(UpperCAmelCase__ ), 'token_str': ANY(UpperCAmelCase__ )},
],
] , )
| 700 |
"""simple docstring"""
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations(_a: int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a: int , _a: list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase__ = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
lowerCamelCase__ = answer
return answer
lowerCamelCase__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = [0] * (target + 1)
lowerCamelCase__ = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 659 | 0 |
import string
import numpy
def snake_case ( _a: Dict , _a: Dict )-> Dict:
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , _lowerCAmelCase )
class _a :
a_ : Tuple = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
a_ : Dict = numpy.vectorize(lambda SCREAMING_SNAKE_CASE_ : x % 36 )
a_ : Optional[int] = numpy.vectorize(__UpperCAmelCase )
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : numpy.ndarray ):
lowerCamelCase__ = self.modulus(_lowerCamelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowerCamelCase__ = encrypt_key.shape[0]
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ):
return self.key_string.index(_lowerCamelCase )
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
return self.key_string[round(_lowerCamelCase )]
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCamelCase__ = det % len(self.key_string )
lowerCamelCase__ = len(self.key_string )
if greatest_common_divisor(_lowerCamelCase , len(self.key_string ) ) != 1:
lowerCamelCase__ = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(_lowerCamelCase )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = [char for char in text.upper() if char in self.key_string]
lowerCamelCase__ = chars[-1]
while len(_lowerCamelCase ) % self.break_key != 0:
chars.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.process_text(text.upper() )
lowerCamelCase__ = ''
for i in range(0 , len(_lowerCamelCase ) - self.break_key + 1 , self.break_key ):
lowerCamelCase__ = text[i : i + self.break_key]
lowerCamelCase__ = [self.replace_letters(_lowerCamelCase ) for char in batch]
lowerCamelCase__ = numpy.array([vec] ).T
lowerCamelCase__ = self.modulus(self.encrypt_key.dot(_lowerCamelCase ) ).T.tolist()[
0
]
lowerCamelCase__ = ''.join(
self.replace_digits(_lowerCamelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCamelCase__ = det % len(self.key_string )
lowerCamelCase__ = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowerCamelCase__ = i
break
lowerCamelCase__ = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(_lowerCamelCase ) )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.make_decrypt_key()
lowerCamelCase__ = self.process_text(text.upper() )
lowerCamelCase__ = ''
for i in range(0 , len(_lowerCamelCase ) - self.break_key + 1 , self.break_key ):
lowerCamelCase__ = text[i : i + self.break_key]
lowerCamelCase__ = [self.replace_letters(_lowerCamelCase ) for char in batch]
lowerCamelCase__ = numpy.array([vec] ).T
lowerCamelCase__ = self.modulus(decrypt_key.dot(_lowerCamelCase ) ).T.tolist()[0]
lowerCamelCase__ = ''.join(
self.replace_digits(_lowerCamelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = int(input('Enter the order of the encryption key: ' ) )
lowerCamelCase__ = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(_lowerCAmelCase ):
lowerCamelCase__ = [int(_lowerCAmelCase ) for x in input().split()]
hill_matrix.append(_lowerCAmelCase )
lowerCamelCase__ = HillCipher(numpy.array(_lowerCAmelCase ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
lowerCamelCase__ = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
lowerCamelCase__ = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(_lowerCAmelCase ) )
elif option == "2":
lowerCamelCase__ = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 701 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 0 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def snake_case ( _a: Callable , _a: float , _a: float , _a: float , _a: float )-> Tuple:
'''simple docstring'''
lowerCamelCase__ = int(np.ceil((x_end - xa) / step_size ) )
lowerCamelCase__ = np.zeros((n + 1,) )
lowerCamelCase__ = ya
lowerCamelCase__ = xa
for k in range(lowerCamelCase_ ):
lowerCamelCase__ = y[k] + step_size * ode_func(lowerCamelCase_ , y[k] )
lowerCamelCase__ = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase_ , y[k] ) + ode_func(x + step_size , lowerCamelCase_ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
"""simple docstring"""
def snake_case ( _a: list[list[float]] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for data in source_data:
for i, el in enumerate(_a ):
if len(_a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_a ) )
return data_lists
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for dlist, weight in zip(_a , _a ):
lowerCamelCase__ = min(_a )
lowerCamelCase__ = max(_a )
lowerCamelCase__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCamelCase__ = F'Invalid weight of {weight:f} provided'
raise ValueError(_a )
score_lists.append(_a )
return score_lists
def snake_case ( _a: list[list[float]] )-> list[float]:
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_a ):
lowerCamelCase__ = final_scores[j] + ele
return final_scores
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = get_data(_a )
lowerCamelCase__ = calculate_each_score(_a , _a )
lowerCamelCase__ = generate_final_scores(_a )
# append scores to source data
for i, ele in enumerate(_a ):
source_data[i].append(_a )
return source_data
| 659 | 0 |
"""simple docstring"""
import os
def snake_case ( _a: List[str] = "input.txt" )-> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(UpperCamelCase__ ) , UpperCamelCase__ ) ) as input_file:
lowerCamelCase__ = [
[int(UpperCamelCase__ ) for element in line.split(',' )]
for line in input_file.readlines()
]
lowerCamelCase__ = len(UpperCamelCase__ )
lowerCamelCase__ = len(matrix[0] )
lowerCamelCase__ = [[-1 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
lowerCamelCase__ = matrix[i][0]
for j in range(1 , UpperCamelCase__ ):
for i in range(UpperCamelCase__ ):
lowerCamelCase__ = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase__ = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowerCamelCase__ = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 703 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def snake_case ( _a: int , _a: int = 2 , _a: int = 1 , _a: int = 3 , )-> int | None:
'''simple docstring'''
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_a: int , _a: int , _a: int ) -> int:
return (pow(_a , 2 ) + step) % modulus
for _ in range(_a ):
# These track the position within the cycle detection logic.
lowerCamelCase__ = seed
lowerCamelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase__ = gcd(hare - tortoise , _a )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
_snake_case = parser.parse_args()
_snake_case = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
_snake_case = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 659 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class _a ( snake_case__ ):
a_ : Optional[int] = 'bridgetower_vision_model'
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=7_68 , SCREAMING_SNAKE_CASE__ : str=12 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=16 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_88 , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Dict=1e-05 , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=False , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ):
super().__init__(**lowercase_ )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_channels
lowerCamelCase__ = patch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = initializer_factor
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = stop_gradient
lowerCamelCase__ = share_layernorm
lowerCamelCase__ = remove_last_layer
@classmethod
def _UpperCamelCase ( cls : int , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = cls.get_config_dict(lowercase_ , **lowercase_ )
if config_dict.get('model_type' ) == "bridgetower":
lowerCamelCase__ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase_ , **lowercase_ )
class _a ( snake_case__ ):
a_ : Tuple = 'bridgetower_text_model'
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict=5_02_65 , SCREAMING_SNAKE_CASE__ : List[Any]=7_68 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : Tuple=30_72 , SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_14 , SCREAMING_SNAKE_CASE__ : Dict=1 , SCREAMING_SNAKE_CASE__ : Dict=1e-05 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : List[str]="absolute" , SCREAMING_SNAKE_CASE__ : List[Any]=True , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
super().__init__(**lowercase_ )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = initializer_factor
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = use_cache
lowerCamelCase__ = pad_token_id
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
@classmethod
def _UpperCamelCase ( cls : str , SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = cls.get_config_dict(lowercase_ , **lowercase_ )
if config_dict.get('model_type' ) == "bridgetower":
lowerCamelCase__ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase_ , **lowercase_ )
class _a ( snake_case__ ):
a_ : int = 'bridgetower'
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=7_68 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : Any=1e-05 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]="add" , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : List[str]=6 , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : str=None , **SCREAMING_SNAKE_CASE__ : Any , ):
# TODO: remove this once the Hub files are updated.
lowerCamelCase__ = kwargs.pop('text_config_dict' , lowercase_ )
lowerCamelCase__ = kwargs.pop('vision_config_dict' , lowercase_ )
super().__init__(**lowercase_ )
lowerCamelCase__ = share_cross_modal_transformer_layers
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_size
lowerCamelCase__ = initializer_factor
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = share_link_tower_layers
lowerCamelCase__ = link_tower_type
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = tie_word_embeddings
lowerCamelCase__ = init_layernorm_from_vision_encoder
if text_config is None:
lowerCamelCase__ = {}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.' )
if vision_config is None:
lowerCamelCase__ = {}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.' )
lowerCamelCase__ = BridgeTowerTextConfig(**lowercase_ )
lowerCamelCase__ = BridgeTowerVisionConfig(**lowercase_ )
@classmethod
def _UpperCamelCase ( cls : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = copy.deepcopy(self.__dict__ )
lowerCamelCase__ = self.text_config.to_dict()
lowerCamelCase__ = self.vision_config.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
| 704 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 659 | 0 |
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _a ( _UpperCAmelCase ):
a_ : List[Any] = None
a_ : Optional[Any] = None
@property
def _UpperCamelCase ( self : Optional[Any] ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A_ , 'feature_size' ) )
self.assertTrue(hasattr(A_ , 'sampling_rate' ) )
self.assertTrue(hasattr(A_ , 'padding_value' ) )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__ = feat_extract.model_input_names[0]
lowerCamelCase__ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(A_ ) == len(A_ ) for x, y in zip(A_ , processed_features[input_name] ) ) )
lowerCamelCase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ )
lowerCamelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
lowerCamelCase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ )
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__ = feat_extract.model_input_names[0]
lowerCamelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
lowerCamelCase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ )
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__ = feat_extract.model_input_names[0]
lowerCamelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
lowerCamelCase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ):
def _inputs_have_equal_length(SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = len(input[0] )
for input_slice in input[1:]:
if len(A_ ) != length:
return False
return True
def _inputs_are_equal(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ):
if len(A_ ) != len(A_ ):
return False
for input_slice_a, input_slice_a in zip(A_ , A_ ):
if not np.allclose(np.asarray(A_ ) , np.asarray(A_ ) , atol=1e-3 ):
return False
return True
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=A_ )
lowerCamelCase__ = feat_extract.model_input_names[0]
lowerCamelCase__ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase__ = self.feat_extract_tester.seq_length_diff
lowerCamelCase__ = self.feat_extract_tester.max_seq_length + pad_diff
lowerCamelCase__ = self.feat_extract_tester.min_seq_length
lowerCamelCase__ = self.feat_extract_tester.batch_size
lowerCamelCase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowerCamelCase__ = feat_extract.pad(A_ , padding=A_ )
lowerCamelCase__ = input_a[input_name]
lowerCamelCase__ = feat_extract.pad(A_ , padding='longest' )
lowerCamelCase__ = input_a[input_name]
lowerCamelCase__ = feat_extract.pad(A_ , padding='max_length' , max_length=len(speech_inputs[-1] ) )
lowerCamelCase__ = input_a[input_name]
lowerCamelCase__ = feat_extract.pad(A_ , padding='longest' , return_tensors='np' )
lowerCamelCase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='max_length' )[input_name]
lowerCamelCase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=A_ , return_tensors='np' )
lowerCamelCase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_are_equal(A_ , A_ ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowerCamelCase__ = feat_extract.pad(A_ , pad_to_multiple_of=10 )
lowerCamelCase__ = input_a[input_name]
lowerCamelCase__ = feat_extract.pad(A_ , padding='longest' , pad_to_multiple_of=10 )
lowerCamelCase__ = input_a[input_name]
lowerCamelCase__ = feat_extract.pad(
A_ , padding='max_length' , pad_to_multiple_of=10 , max_length=A_ )
lowerCamelCase__ = input_a[input_name]
lowerCamelCase__ = feat_extract.pad(
A_ , padding='max_length' , pad_to_multiple_of=10 , max_length=A_ , return_tensors='np' , )
lowerCamelCase__ = input_a[input_name]
self.assertTrue(all(len(A_ ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(A_ , A_ ) )
lowerCamelCase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(A_ ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
lowerCamelCase__ = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=False ):
def _inputs_have_equal_length(SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = len(input[0] )
for input_slice in input[1:]:
if len(A_ ) != length:
return False
return True
def _inputs_are_equal(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
if len(A_ ) != len(A_ ):
return False
for input_slice_a, input_slice_a in zip(A_ , A_ ):
if not np.allclose(np.asarray(A_ ) , np.asarray(A_ ) , atol=1e-3 ):
return False
return True
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=A_ )
lowerCamelCase__ = feat_extract.model_input_names[0]
lowerCamelCase__ = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
lowerCamelCase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=A_ )
lowerCamelCase__ = input_a[input_name]
lowerCamelCase__ = feat_extract.pad(A_ , padding='max_length' , max_length=len(speech_inputs[0] ) )
lowerCamelCase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertFalse(_inputs_have_equal_length(A_ ) )
# truncate to smallest with np
lowerCamelCase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=A_ , )
lowerCamelCase__ = input_a[input_name]
lowerCamelCase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
lowerCamelCase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(A_ ) )
# truncate to middle
lowerCamelCase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=A_ , return_tensors='np' , )
lowerCamelCase__ = input_a[input_name]
lowerCamelCase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=A_ )
lowerCamelCase__ = input_a[input_name]
lowerCamelCase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
lowerCamelCase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_are_equal(A_ , A_ ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(A_ ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , truncation=A_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='longest' , truncation=A_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='longest' , truncation=A_ )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='max_length' , truncation=A_ )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowerCamelCase__ = 12
lowerCamelCase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=A_ , truncation=A_ , )
lowerCamelCase__ = input_a[input_name]
lowerCamelCase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=A_ , )
lowerCamelCase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowerCamelCase__ = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
lowerCamelCase__ = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertFalse(_inputs_have_equal_length(A_ ) )
def _UpperCamelCase ( self : int ):
self._check_padding(numpify=A_ )
def _UpperCamelCase ( self : Optional[int] ):
self._check_padding(numpify=A_ )
def _UpperCamelCase ( self : List[Any] ):
self._check_truncation(numpify=A_ )
def _UpperCamelCase ( self : int ):
self._check_truncation(numpify=A_ )
@require_torch
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase__ = feat_extract.model_input_names[0]
lowerCamelCase__ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase__ = feat_extract.pad(A_ , padding='longest' , return_tensors='np' )[input_name]
lowerCamelCase__ = feat_extract.pad(A_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase__ = feat_extract.model_input_names[0]
lowerCamelCase__ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase__ = feat_extract.pad(A_ , padding='longest' , return_tensors='np' )[input_name]
lowerCamelCase__ = feat_extract.pad(A_ , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.feat_extract_dict
lowerCamelCase__ = True
lowerCamelCase__ = self.feature_extraction_class(**A_ )
lowerCamelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase__ = [len(A_ ) for x in speech_inputs]
lowerCamelCase__ = feat_extract.model_input_names[0]
lowerCamelCase__ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase__ = feat_extract.pad(A_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , A_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , A_ )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.feat_extract_dict
lowerCamelCase__ = True
lowerCamelCase__ = self.feature_extraction_class(**A_ )
lowerCamelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase__ = [len(A_ ) for x in speech_inputs]
lowerCamelCase__ = feat_extract.model_input_names[0]
lowerCamelCase__ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase__ = min(A_ )
lowerCamelCase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='np' )
self.assertIn('attention_mask' , A_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 705 |
"""simple docstring"""
from __future__ import annotations
_snake_case = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def snake_case ( _a: list[list[int]] , _a: list[int] , _a: list[int] , _a: int , _a: list[list[int]] , )-> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the reference grid
lowerCamelCase__ = 1
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the action grid
lowerCamelCase__ = init[0]
lowerCamelCase__ = init[1]
lowerCamelCase__ = 0
lowerCamelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase__ = [[f, g, x, y]]
lowerCamelCase__ = False # flag that is set when search is complete
lowerCamelCase__ = False # flag set if we can't find expand
while not found and not resign:
if len(_a ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase__ = cell.pop()
lowerCamelCase__ = next_cell[2]
lowerCamelCase__ = next_cell[3]
lowerCamelCase__ = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase__ = True
else:
for i in range(len(_a ) ): # to try out different valid actions
lowerCamelCase__ = x + DIRECTIONS[i][0]
lowerCamelCase__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_a ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase__ = g + cost
lowerCamelCase__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase__ = 1
lowerCamelCase__ = i
lowerCamelCase__ = []
lowerCamelCase__ = goal[0]
lowerCamelCase__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase__ = x - DIRECTIONS[action[x][y]][0]
lowerCamelCase__ = y - DIRECTIONS[action[x][y]][1]
lowerCamelCase__ = xa
lowerCamelCase__ = ya
invpath.append([x, y] )
lowerCamelCase__ = []
for i in range(len(_a ) ):
path.append(invpath[len(_a ) - 1 - i] )
return path, action
if __name__ == "__main__":
_snake_case = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_snake_case = [0, 0]
# all coordinates are given in format [y,x]
_snake_case = [len(grid) - 1, len(grid[0]) - 1]
_snake_case = 1
# the cost map which pushes the path closer to the goal
_snake_case = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_snake_case = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_snake_case = 99
_snake_case , _snake_case = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 659 | 0 |
"""simple docstring"""
from __future__ import annotations
def snake_case ( _a: list[int] , _a: int )-> list[int]:
'''simple docstring'''
lowerCamelCase__ = 0
lowerCamelCase__ = len(_lowerCamelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCamelCase__ = i + 1
else:
lowerCamelCase__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 706 |
"""simple docstring"""
def snake_case ( _a: int = 4000000 )-> int:
'''simple docstring'''
lowerCamelCase__ = [0, 1]
lowerCamelCase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase__ = 0
for j in range(len(_a ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 659 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = inspect.getfile(accelerate.test_utils )
lowerCamelCase__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase__ = test_metrics
@require_cpu
def _UpperCamelCase ( self : Any ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _UpperCamelCase ( self : List[str] ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _UpperCamelCase ( self : List[Any] ):
self.test_metrics.main()
@require_multi_gpu
def _UpperCamelCase ( self : str ):
print(F'Found {torch.cuda.device_count()} devices.' )
lowerCamelCase__ = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCamelCase , env=os.environ.copy() )
| 707 |
"""simple docstring"""
def snake_case ( _a: List[Any] , _a: Any , _a: str , _a: List[Any] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = []
queue.append(_a )
lowerCamelCase__ = True
while queue:
lowerCamelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
lowerCamelCase__ = True
lowerCamelCase__ = u
return visited[t]
def snake_case ( _a: List[Any] , _a: str , _a: List[str] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = [-1] * (len(_a ))
lowerCamelCase__ = 0
while bfs(_a , _a , _a , _a ):
lowerCamelCase__ = float('Inf' )
lowerCamelCase__ = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ = min(_a , graph[parent[s]][s] )
lowerCamelCase__ = parent[s]
max_flow += path_flow
lowerCamelCase__ = sink
while v != source:
lowerCamelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ = parent[v]
return max_flow
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_snake_case , _snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 659 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class _a ( A_ ):
a_ : Optional[int] = '''blip_2_vision_model'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : str=14_08 , SCREAMING_SNAKE_CASE__ : str=61_44 , SCREAMING_SNAKE_CASE__ : int=39 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 , SCREAMING_SNAKE_CASE__ : Optional[int]=2_24 , SCREAMING_SNAKE_CASE__ : Dict=14 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : List[str]=0.0_00_01 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : int=1e-10 , SCREAMING_SNAKE_CASE__ : List[str]=True , **SCREAMING_SNAKE_CASE__ : List[str] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = patch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = hidden_act
lowerCamelCase__ = qkv_bias
@classmethod
def _UpperCamelCase ( cls : int , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Any ):
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
lowerCamelCase__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class _a ( A_ ):
a_ : Any = '''blip_2_qformer'''
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Dict=3_05_22 , SCREAMING_SNAKE_CASE__ : List[str]=7_68 , SCREAMING_SNAKE_CASE__ : Optional[int]=12 , SCREAMING_SNAKE_CASE__ : str=12 , SCREAMING_SNAKE_CASE__ : List[str]=30_72 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : str=5_12 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : int=1e-12 , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : int="absolute" , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : str=14_08 , **SCREAMING_SNAKE_CASE__ : List[str] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = cross_attention_frequency
lowerCamelCase__ = encoder_hidden_size
@classmethod
def _UpperCamelCase ( cls : int , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[str] ):
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
lowerCamelCase__ = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class _a ( A_ ):
a_ : int = '''blip-2'''
a_ : Optional[int] = True
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : List[str]=32 , **SCREAMING_SNAKE_CASE__ : int ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
if vision_config is None:
lowerCamelCase__ = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
lowerCamelCase__ = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
lowerCamelCase__ = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
lowerCamelCase__ = BlipaVisionConfig(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = BlipaQFormerConfig(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
lowerCamelCase__ = CONFIG_MAPPING[text_model_type](**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.text_config.tie_word_embeddings
lowerCamelCase__ = self.text_config.is_encoder_decoder
lowerCamelCase__ = num_query_tokens
lowerCamelCase__ = self.vision_config.hidden_size
lowerCamelCase__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase__ = 1.0
lowerCamelCase__ = 0.02
@classmethod
def _UpperCamelCase ( cls : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = copy.deepcopy(self.__dict__ )
lowerCamelCase__ = self.vision_config.to_dict()
lowerCamelCase__ = self.qformer_config.to_dict()
lowerCamelCase__ = self.text_config.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
| 708 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_snake_case = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[int]=1 ):
lowerCamelCase__ = tokenizer
lowerCamelCase__ = dataset
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ ) if n_tasks is None else n_tasks
lowerCamelCase__ = n_copies
def __iter__( self : Any ):
lowerCamelCase__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = start_length
lowerCamelCase__ = eof_strings
lowerCamelCase__ = tokenizer
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: List[Any] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = re.split('(%s)' % '|'.join(_a ) , _a )
# last string should be ""
return "".join(string_list[:-2] )
def snake_case ( _a: List[Any] , _a: Optional[int] , _a: str , _a: Union[str, Any] , _a: Dict , _a: Optional[int]=20 , **_a: Optional[int] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = defaultdict(_a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_a ) ):
with torch.no_grad():
lowerCamelCase__ = batch['ids'].shape[-1]
lowerCamelCase__ = accelerator.unwrap_model(_a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_a , **_a )
# each task is generated batch_size times
lowerCamelCase__ = batch['task_id'].repeat(_a )
lowerCamelCase__ = accelerator.pad_across_processes(
_a , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ = generated_tokens.cpu().numpy()
lowerCamelCase__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_a , _a ):
gen_token_dict[task].append(_a )
lowerCamelCase__ = [[] for _ in range(_a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ = tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
code_gens[task].append(remove_last_block(_a ) )
return code_gens
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser(_a )
lowerCamelCase__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ = 'false'
if args.num_workers is None:
lowerCamelCase__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ = Accelerator()
set_seed(args.seed , device_specific=_a )
# Load model and tokenizer
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ = tokenizer.eos_token
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _a , _a )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ = load_dataset('openai_humaneval' )
lowerCamelCase__ = load_metric('code_eval' )
lowerCamelCase__ = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ = args.n_samples // args.batch_size
lowerCamelCase__ = TokenizedDataset(_a , human_eval['test'] , n_copies=_a , n_tasks=_a )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ = DataLoader(_a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(_a , _a )
lowerCamelCase__ = complete_code(
_a , _a , _a , _a , n_tasks=_a , batch_size=args.batch_size , **_a , )
if accelerator.is_main_process:
lowerCamelCase__ = []
for task in tqdm(range(_a ) ):
lowerCamelCase__ = human_eval['test'][task]['test']
lowerCamelCase__ = F'check({human_eval["test"][task]["entry_point"]})'
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ = code_eval_metric.compute(
references=_a , predictions=_a , num_workers=args.num_workers )
print(F'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_a , _a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 709 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_a , help='where to store parsed gold_data_path file' , )
lowerCamelCase__ = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowerCamelCase__ = json.load(_a )
for dpr_record in tqdm(_a ):
lowerCamelCase__ = dpr_record['question']
lowerCamelCase__ = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_a ) + '\n' )
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 710 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _a ( lowercase_ ):
a_ : List[Any] = (DEISMultistepScheduler,)
a_ : str = (('''num_inference_steps''', 25),)
def _UpperCamelCase ( self : Any , **SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = {
'num_train_timesteps': 10_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**UpperCamelCase__ )
return config
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple=0 , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = dict(self.forward_default_kwargs )
lowerCamelCase__ = kwargs.pop('num_inference_steps' , UpperCamelCase__ )
lowerCamelCase__ = self.dummy_sample
lowerCamelCase__ = 0.1 * sample
lowerCamelCase__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ = self.get_scheduler_config(**UpperCamelCase__ )
lowerCamelCase__ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
lowerCamelCase__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
lowerCamelCase__ = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
lowerCamelCase__ = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase__ , lowerCamelCase__ = sample, sample
for t in range(UpperCamelCase__ , time_step + scheduler.config.solver_order + 1 ):
lowerCamelCase__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
lowerCamelCase__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCamelCase ( self : Union[str, Any] ):
pass
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : int=0 , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
lowerCamelCase__ = dict(self.forward_default_kwargs )
lowerCamelCase__ = kwargs.pop('num_inference_steps' , UpperCamelCase__ )
lowerCamelCase__ = self.dummy_sample
lowerCamelCase__ = 0.1 * sample
lowerCamelCase__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCamelCase__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
lowerCamelCase__ = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCamelCase__ = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
lowerCamelCase__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : List[Any] ):
if scheduler is None:
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(**UpperCamelCase__ )
lowerCamelCase__ = scheduler_class(**UpperCamelCase__ )
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(**UpperCamelCase__ )
lowerCamelCase__ = scheduler_class(**UpperCamelCase__ )
lowerCamelCase__ = 10
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase__ = model(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = dict(self.forward_default_kwargs )
lowerCamelCase__ = kwargs.pop('num_inference_steps' , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**UpperCamelCase__ )
lowerCamelCase__ = self.dummy_sample
lowerCamelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , 'set_timesteps' ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , 'set_timesteps' ):
lowerCamelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCamelCase__ = [residual + 0.2, residual + 0.15, residual + 0.10]
lowerCamelCase__ = dummy_past_residuals[: scheduler.config.solver_order]
lowerCamelCase__ = scheduler.timesteps[5]
lowerCamelCase__ = scheduler.timesteps[6]
lowerCamelCase__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
lowerCamelCase__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCamelCase__ = self.full_loop(scheduler=UpperCamelCase__ )
lowerCamelCase__ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
lowerCamelCase__ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCamelCase__ = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCamelCase__ = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCamelCase__ = DEISMultistepScheduler.from_config(scheduler.config )
lowerCamelCase__ = self.full_loop(scheduler=UpperCamelCase__ )
lowerCamelCase__ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
def _UpperCamelCase ( self : int ):
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def _UpperCamelCase ( self : str ):
self.check_over_configs(thresholding=UpperCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCamelCase__ , prediction_type=UpperCamelCase__ , sample_max_value=UpperCamelCase__ , algorithm_type='deis' , solver_order=UpperCamelCase__ , solver_type=UpperCamelCase__ , )
def _UpperCamelCase ( self : Dict ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def _UpperCamelCase ( self : Dict ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCamelCase__ , solver_type=UpperCamelCase__ , prediction_type=UpperCamelCase__ , algorithm_type=UpperCamelCase__ , )
lowerCamelCase__ = self.full_loop(
solver_order=UpperCamelCase__ , solver_type=UpperCamelCase__ , prediction_type=UpperCamelCase__ , algorithm_type=UpperCamelCase__ , )
assert not torch.isnan(UpperCamelCase__ ).any(), "Samples have nan numbers"
def _UpperCamelCase ( self : Dict ):
self.check_over_configs(lower_order_final=UpperCamelCase__ )
self.check_over_configs(lower_order_final=UpperCamelCase__ )
def _UpperCamelCase ( self : Tuple ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=0 )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = self.full_loop()
lowerCamelCase__ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.full_loop(prediction_type='v_prediction' )
lowerCamelCase__ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.0_91 ) < 1e-3
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(thresholding=UpperCamelCase__ , dynamic_thresholding_ratio=0 )
lowerCamelCase__ = scheduler_class(**UpperCamelCase__ )
lowerCamelCase__ = 10
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase__ = model(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
| 711 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[str, Any] = 'swinv2'
a_ : Optional[int] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int=2_24 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : str=96 , SCREAMING_SNAKE_CASE__ : Dict=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4.0 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : int=32 , **SCREAMING_SNAKE_CASE__ : List[str] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
lowerCamelCase__ = (0, 0, 0, 0)
| 659 | 0 |
"""simple docstring"""
import torch
from torch import nn
class _a ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : int=False ):
super().__init__()
lowerCamelCase__ = n_token
lowerCamelCase__ = d_embed
lowerCamelCase__ = d_proj
lowerCamelCase__ = cutoffs + [n_token]
lowerCamelCase__ = [0] + self.cutoffs
lowerCamelCase__ = div_val
lowerCamelCase__ = self.cutoffs[0]
lowerCamelCase__ = len(self.cutoffs ) - 1
lowerCamelCase__ = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCamelCase__ = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCamelCase__ = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCamelCase__ = nn.ModuleList()
lowerCamelCase__ = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
else:
self.out_projs.append(lowerCAmelCase_ )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCamelCase__ , lowerCamelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCamelCase__ = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , r_idx - l_idx ) )
lowerCamelCase__ = keep_order
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict ):
if proj is None:
lowerCamelCase__ = nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCamelCase__ = nn.functional.linear(lowerCAmelCase_ , proj.t().contiguous() )
lowerCamelCase__ = nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=False ):
if labels is not None:
# Shift so that tokens < n predict n
lowerCamelCase__ = hidden[..., :-1, :].contiguous()
lowerCamelCase__ = labels[..., 1:].contiguous()
lowerCamelCase__ = hidden.view(-1 , hidden.size(-1 ) )
lowerCamelCase__ = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowerCamelCase__ = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCamelCase__ = self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCamelCase__ = labels != -1_00
lowerCamelCase__ = torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
lowerCamelCase__ = (
-nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCamelCase__ = nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
lowerCamelCase__ , lowerCamelCase__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCamelCase__ , lowerCamelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCamelCase__ = self.out_layers[0].weight[l_idx:r_idx]
lowerCamelCase__ = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCamelCase__ = self.out_layers[i].weight
lowerCamelCase__ = self.out_layers[i].bias
if i == 0:
lowerCamelCase__ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCamelCase__ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = weights[0], biases[0], self.out_projs[0]
lowerCamelCase__ = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCamelCase__ = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
if labels is None:
lowerCamelCase__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCamelCase__ = torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
lowerCamelCase__ = 0
lowerCamelCase__ = [0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
lowerCamelCase__ , lowerCamelCase__ = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCamelCase__ = (labels >= l_idx) & (labels < r_idx)
lowerCamelCase__ = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCamelCase__ = labels.index_select(0 , lowerCAmelCase_ ) - l_idx
lowerCamelCase__ = head_logprob.index_select(0 , lowerCAmelCase_ )
lowerCamelCase__ = hidden.index_select(0 , lowerCAmelCase_ )
else:
lowerCamelCase__ = hidden
if i == 0:
if labels is not None:
lowerCamelCase__ = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCamelCase__ = head_logprob[:, : self.cutoffs[0]]
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = weights[i], biases[i], self.out_projs[i]
lowerCamelCase__ = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCamelCase__ = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
lowerCamelCase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCamelCase__ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCamelCase__ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCamelCase__ = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
if self.n_clusters == 0:
lowerCamelCase__ = self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
lowerCamelCase__ , lowerCamelCase__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCamelCase__ , lowerCamelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCamelCase__ = self.out_layers[0].weight[l_idx:r_idx]
lowerCamelCase__ = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCamelCase__ = self.out_layers[i].weight
lowerCamelCase__ = self.out_layers[i].bias
if i == 0:
lowerCamelCase__ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCamelCase__ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = weights[0], biases[0], self.out_projs[0]
lowerCamelCase__ = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCamelCase__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCamelCase__ = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
lowerCamelCase__ = [0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
lowerCamelCase__ , lowerCamelCase__ = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCamelCase__ = head_logprob[:, : self.cutoffs[0]]
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = weights[i], biases[i], self.out_projs[i]
lowerCamelCase__ = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCamelCase__ = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
lowerCamelCase__ = head_logprob[:, -i] + tail_logprob_i
lowerCamelCase__ = logprob_i
return out
| 712 |
"""simple docstring"""
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 0
while number > 0:
lowerCamelCase__ = number % 10
sum_of_digits += last_digit
lowerCamelCase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case ( _a: int = 100 )-> int:
'''simple docstring'''
lowerCamelCase__ = factorial(_a )
lowerCamelCase__ = split_and_add(_a )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 659 | 0 |
"""simple docstring"""
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 713 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def snake_case ( _a: List[str] )-> Optional[MinHash]:
'''simple docstring'''
if len(_a ) < MIN_NUM_TOKENS:
return None
lowerCamelCase__ = MinHash(num_perm=_a )
for token in set(_a ):
min_hash.update(token.encode() )
return min_hash
def snake_case ( _a: str )-> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(_a ) if len(t.strip() ) > 0}
class _a :
def __init__( self : List[Any] , *,
SCREAMING_SNAKE_CASE__ : float = 0.85 , ):
lowerCamelCase__ = duplication_jaccard_threshold
lowerCamelCase__ = NUM_PERM
lowerCamelCase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : MinHash ):
lowerCamelCase__ = self._index.query(SCREAMING_SNAKE_CASE__ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(SCREAMING_SNAKE_CASE__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCamelCase__ = [base] + list(SCREAMING_SNAKE_CASE__ )
# reformat the cluster to be a list of dict
lowerCamelCase__ = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(SCREAMING_SNAKE_CASE__ )
return duplicate_clusters
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.get_duplicate_clusters()
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = element
lowerCamelCase__ = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case ( _a: Type[Dataset] )-> Tuple:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_a , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case ( _a: Type[Dataset] , _a: float )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = DuplicationIndex(duplication_jaccard_threshold=_a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_a ) ) , max_queue_size=100 ) ):
di.add(_a , _a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case ( _a: str , _a: str )-> float:
'''simple docstring'''
lowerCamelCase__ = get_tokens(_a )
lowerCamelCase__ = get_tokens(_a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def snake_case ( _a: Dict , _a: Union[str, Any] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for elementa in cluster:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_a , _a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCamelCase__ = 1
extremes.append(_a )
return extremes
def snake_case ( _a: Any , _a: Tuple , _a: Dict )-> Union[str, Any]:
'''simple docstring'''
global _shared_dataset
lowerCamelCase__ = dataset
lowerCamelCase__ = []
lowerCamelCase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=_a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_a , _a , ) , total=len(_a ) , ):
extremes_list.append(_a )
return extremes_list
def snake_case ( _a: Type[Dataset] , _a: float = 0.85 )-> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowerCamelCase__ = make_duplicate_clusters(_a , _a )
lowerCamelCase__ = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
lowerCamelCase__ = {}
lowerCamelCase__ = find_extremes(_a , _a , _a )
for extremes in extremes_clusters:
for element in extremes:
lowerCamelCase__ = element
lowerCamelCase__ = duplicate_indices - set(extreme_dict.keys() )
lowerCamelCase__ = dataset.filter(lambda _a , _a : idx not in remove_indices , with_indices=_a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCamelCase__ = element['base_index'] in extreme_dict
if element["is_extreme"]:
lowerCamelCase__ = extreme_dict[element['base_index']]['copies']
print(F'Original dataset size: {len(_a )}' )
print(F'Number of duplicate clusters: {len(_a )}' )
print(F'Files in duplicate cluster: {len(_a )}' )
print(F'Unique files in duplicate cluster: {len(_a )}' )
print(F'Filtered dataset size: {len(_a )}' )
return ds_filter, duplicate_clusters
| 659 | 0 |
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( _a ):
a_ : List[str] = (CMStochasticIterativeScheduler,)
a_ : Tuple = 10
def _UpperCamelCase ( self : List[str] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = {
'num_train_timesteps': 2_01,
'sigma_min': 0.0_02,
'sigma_max': 80.0,
}
config.update(**_A )
return config
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = 10
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = self.scheduler_classes[0](**_A )
scheduler.set_timesteps(_A )
lowerCamelCase__ = scheduler.timesteps[0]
lowerCamelCase__ = scheduler.timesteps[1]
lowerCamelCase__ = self.dummy_sample
lowerCamelCase__ = 0.1 * sample
lowerCamelCase__ = scheduler.step(_A , _A , _A ).prev_sample
lowerCamelCase__ = scheduler.step(_A , _A , _A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _UpperCamelCase ( self : Dict ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_A )
def _UpperCamelCase ( self : Dict ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_A )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**_A )
lowerCamelCase__ = 1
scheduler.set_timesteps(_A )
lowerCamelCase__ = scheduler.timesteps
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_A ):
# 1. scale model input
lowerCamelCase__ = scheduler.scale_model_input(_A , _A )
# 2. predict noise residual
lowerCamelCase__ = model(_A , _A )
# 3. predict previous sample x_t-1
lowerCamelCase__ = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
lowerCamelCase__ = pred_prev_sample
lowerCamelCase__ = torch.sum(torch.abs(_A ) )
lowerCamelCase__ = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1e-2
assert abs(result_mean.item() - 0.25_10 ) < 1e-3
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**_A )
lowerCamelCase__ = [1_06, 0]
scheduler.set_timesteps(timesteps=_A )
lowerCamelCase__ = scheduler.timesteps
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowerCamelCase__ = scheduler.scale_model_input(_A , _A )
# 2. predict noise residual
lowerCamelCase__ = model(_A , _A )
# 3. predict previous sample x_t-1
lowerCamelCase__ = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
lowerCamelCase__ = pred_prev_sample
lowerCamelCase__ = torch.sum(torch.abs(_A ) )
lowerCamelCase__ = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1e-2
assert abs(result_mean.item() - 0.45_27 ) < 1e-3
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**_A )
lowerCamelCase__ = [39, 30, 12, 15, 0]
with self.assertRaises(_A , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_A )
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**_A )
lowerCamelCase__ = [39, 30, 12, 1, 0]
lowerCamelCase__ = len(_A )
with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**_A )
lowerCamelCase__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_A )
| 714 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_snake_case = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def snake_case ( _a: Any )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = test_results.split(' ' )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase__ = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def snake_case ( _a: Optional[int] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = {}
lowerCamelCase__ = None
lowerCamelCase__ = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , _a ):
lowerCamelCase__ = True
lowerCamelCase__ = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
lowerCamelCase__ = line
lowerCamelCase__ = False
return failures
class _a :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = title
lowerCamelCase__ = doc_test_results['time_spent'].split(',' )[0]
lowerCamelCase__ = doc_test_results['success']
lowerCamelCase__ = doc_test_results['failures']
lowerCamelCase__ = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase__ = doc_test_results
@property
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = [self._time_spent]
lowerCamelCase__ = 0
for time in time_spent:
lowerCamelCase__ = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
lowerCamelCase__ = [0, 0, time_parts[0]]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'
@property
def _UpperCamelCase ( self : Dict ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self : Dict ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Any ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = 40
lowerCamelCase__ = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
lowerCamelCase__ = ''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def _UpperCamelCase ( ):
lowerCamelCase__ = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[int] ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
lowerCamelCase__ = F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
lowerCamelCase__ = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = ''
for key, value in failures.items():
lowerCamelCase__ = value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value
failures_text += F'*{key}*\n_{value}_\n\n'
lowerCamelCase__ = job_name
lowerCamelCase__ = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
lowerCamelCase__ = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self : Optional[int] ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
lowerCamelCase__ = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
lowerCamelCase__ = sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
lowerCamelCase__ = F'*Num failures* :{len(job_result["failed"] )} \n'
lowerCamelCase__ = job_result['failures']
lowerCamelCase__ = self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def snake_case ( )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = os.environ['GITHUB_RUN_ID']
lowerCamelCase__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
lowerCamelCase__ = requests.get(_a ).json()
lowerCamelCase__ = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
lowerCamelCase__ = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_a ):
lowerCamelCase__ = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , _a )
return {}
def snake_case ( _a: str )-> Dict:
'''simple docstring'''
lowerCamelCase__ = {}
if os.path.exists(_a ):
lowerCamelCase__ = os.listdir(_a )
for file in files:
try:
with open(os.path.join(_a , _a ) , encoding='utf-8' ) as f:
lowerCamelCase__ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(_a , _a )}.' ) from e
return _artifact
def snake_case ( )-> Optional[int]:
'''simple docstring'''
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = name
lowerCamelCase__ = []
def __str__( self : Dict ):
return self.name
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
self.paths.append({'name': self.name, 'path': path} )
lowerCamelCase__ = {}
lowerCamelCase__ = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase__ = directory
if artifact_name not in _available_artifacts:
lowerCamelCase__ = Artifact(_a )
_available_artifacts[artifact_name].add_path(_a )
return _available_artifacts
if __name__ == "__main__":
_snake_case = get_job_links()
_snake_case = retrieve_available_artifacts()
_snake_case = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_snake_case = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_snake_case = github_actions_job_links.get("run_doctests")
_snake_case = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_snake_case = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_snake_case , _snake_case , _snake_case = handle_test_results(artifact["stats"])
_snake_case = failed
_snake_case = success
_snake_case = time_spent[1:-1] + ", "
_snake_case = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_snake_case = line.replace("FAILED ", "")
_snake_case = line.split()[0].replace("\n", "")
if "::" in line:
_snake_case , _snake_case = line.split("::")
else:
_snake_case , _snake_case = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_snake_case = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_snake_case = all_failures[test] if test in all_failures else "N/A"
_snake_case = failure
break
_snake_case = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 659 | 0 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _a :
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : str = "cpu" , SCREAMING_SNAKE_CASE__ : str = "openai/clip-vit-large-patch14" ):
lowerCamelCase__ = device
lowerCamelCase__ = CLIPTokenizerFast.from_pretrained(__UpperCamelCase )
lowerCamelCase__ = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
lowerCamelCase__ = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
lowerCamelCase__ = torchvision.transforms.Normalize(self.image_mean , self.image_std )
lowerCamelCase__ = torchvision.transforms.Resize(2_24 )
lowerCamelCase__ = torchvision.transforms.CenterCrop(2_24 )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.resize(__UpperCamelCase )
lowerCamelCase__ = self.center_crop(__UpperCamelCase )
lowerCamelCase__ = self.normalize(__UpperCamelCase )
return images
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.tokenizer(text=__UpperCamelCase , **__UpperCamelCase )
lowerCamelCase__ = self.preprocess_img(__UpperCamelCase )
lowerCamelCase__ = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _a ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Dict=10 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.01 , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="image" , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , ):
super().__init__()
lowerCamelCase__ = None
lowerCamelCase__ = device if device else get_device()
if vqgan:
lowerCamelCase__ = vqgan
else:
lowerCamelCase__ = load_vqgan(self.device , conf_path=__UpperCamelCase , ckpt_path=__UpperCamelCase )
self.vqgan.eval()
if clip:
lowerCamelCase__ = clip
else:
lowerCamelCase__ = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
lowerCamelCase__ = ProcessorGradientFlow(device=self.device )
lowerCamelCase__ = iterations
lowerCamelCase__ = lr
lowerCamelCase__ = log
lowerCamelCase__ = make_grid
lowerCamelCase__ = return_val
lowerCamelCase__ = quantize
lowerCamelCase__ = self.vqgan.decoder.z_shape
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : str=5 , SCREAMING_SNAKE_CASE__ : int=True ):
lowerCamelCase__ = []
if output_path is None:
lowerCamelCase__ = './animation.gif'
if input_path is None:
lowerCamelCase__ = self.save_path
lowerCamelCase__ = sorted(glob(input_path + '/*' ) )
if not len(__UpperCamelCase ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(__UpperCamelCase ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
lowerCamelCase__ = total_duration / len(__UpperCamelCase )
lowerCamelCase__ = [frame_duration] * len(__UpperCamelCase )
if extend_frames:
lowerCamelCase__ = 1.5
lowerCamelCase__ = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(__UpperCamelCase ) )
imageio.mimsave(__UpperCamelCase , __UpperCamelCase , duration=__UpperCamelCase )
print(F'gif saved to {output_path}' )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Dict=None ):
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
lowerCamelCase__ = preprocess(Image.open(__UpperCamelCase ) , target_image_size=2_56 ).to(self.device )
lowerCamelCase__ = preprocess_vqgan(__UpperCamelCase )
lowerCamelCase__ , *lowerCamelCase__ = self.vqgan.encode(__UpperCamelCase )
return z
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.latent.detach().requires_grad_()
lowerCamelCase__ = base_latent + transform_vector
if self.quantize:
lowerCamelCase__ , *lowerCamelCase__ = self.vqgan.quantize(__UpperCamelCase )
else:
lowerCamelCase__ = trans_latent
return self.vqgan.decode(__UpperCamelCase )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict=None ):
lowerCamelCase__ = self.clip_preprocessor(text=__UpperCamelCase , images=__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase )
lowerCamelCase__ = self.clip(**__UpperCamelCase )
lowerCamelCase__ = clip_outputs.logits_per_image
if weights is not None:
lowerCamelCase__ = similarity_logits * weights
return similarity_logits.sum()
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self._get_clip_similarity(pos_prompts['prompts'] , __UpperCamelCase , weights=(1 / pos_prompts['weights']) )
if neg_prompts:
lowerCamelCase__ = self._get_clip_similarity(neg_prompts['prompts'] , __UpperCamelCase , weights=neg_prompts['weights'] )
else:
lowerCamelCase__ = torch.tensor([1] , device=self.device )
lowerCamelCase__ = -torch.log(__UpperCamelCase ) + torch.log(__UpperCamelCase )
return loss
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] ):
lowerCamelCase__ = torch.randn_like(self.latent , requires_grad=__UpperCamelCase , device=self.device )
lowerCamelCase__ = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
lowerCamelCase__ = self._add_vector(__UpperCamelCase )
lowerCamelCase__ = loop_post_process(__UpperCamelCase )
lowerCamelCase__ = self._get_CLIP_loss(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
print('CLIP loss' , __UpperCamelCase )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=__UpperCamelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
wandb.init(reinit=__UpperCamelCase , project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
lowerCamelCase__ = Image.open(__UpperCamelCase )
lowerCamelCase__ = image.resize((2_56, 2_56) )
wandb.log('Original Image' , wandb.Image(__UpperCamelCase ) )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if not prompts:
return []
lowerCamelCase__ = []
lowerCamelCase__ = []
if isinstance(__UpperCamelCase , __UpperCamelCase ):
lowerCamelCase__ = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(__UpperCamelCase , (tuple, list) ):
lowerCamelCase__ = prompt[0]
lowerCamelCase__ = float(prompt[1] )
elif ":" in prompt:
lowerCamelCase__ , lowerCamelCase__ = prompt.split(':' )
lowerCamelCase__ = float(__UpperCamelCase )
else:
lowerCamelCase__ = prompt
lowerCamelCase__ = 1.0
processed_prompts.append(__UpperCamelCase )
weights.append(__UpperCamelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__UpperCamelCase , device=self.device ),
}
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ):
if image_path:
lowerCamelCase__ = self._get_latent(__UpperCamelCase )
else:
lowerCamelCase__ = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
assert pos_prompts, "You must provide at least one positive prompt."
lowerCamelCase__ = self.process_prompts(__UpperCamelCase )
lowerCamelCase__ = self.process_prompts(__UpperCamelCase )
if save_final and save_path is None:
lowerCamelCase__ = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
else:
lowerCamelCase__ = save_path + '_' + get_timestamp()
os.makedirs(__UpperCamelCase )
lowerCamelCase__ = save_path
lowerCamelCase__ = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(__UpperCamelCase ) )
lowerCamelCase__ = loop_post_process(__UpperCamelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) ):
if show_intermediate:
show_pil(__UpperCamelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({'Image': wandb.Image(__UpperCamelCase )} )
if show_final:
show_pil(__UpperCamelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}_final.png' ) )
| 715 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[PIL.Image.Image, np.ndarray]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : PriorTransformer , SCREAMING_SNAKE_CASE__ : CLIPVisionModel , SCREAMING_SNAKE_CASE__ : CLIPImageProcessor , SCREAMING_SNAKE_CASE__ : HeunDiscreteScheduler , SCREAMING_SNAKE_CASE__ : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=SCREAMING_SNAKE_CASE__ , image_encoder=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , renderer=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
if latents is None:
lowerCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase__ = latents.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase__ = torch.device(F'cuda:{gpu_id}' )
lowerCamelCase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self : Dict ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE__ , axis=0 ) if image[0].ndim == 4 else torch.stack(SCREAMING_SNAKE_CASE__ , axis=0 )
if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
lowerCamelCase__ = image.to(dtype=self.image_encoder.dtype , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.image_encoder(SCREAMING_SNAKE_CASE__ )['last_hidden_state']
lowerCamelCase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase__ = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ = torch.zeros_like(SCREAMING_SNAKE_CASE__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 25 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : float = 4.0 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ):
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowerCamelCase__ = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = image.shape[0]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(SCREAMING_SNAKE_CASE__ )}' )
lowerCamelCase__ = self._execution_device
lowerCamelCase__ = batch_size * num_images_per_prompt
lowerCamelCase__ = guidance_scale > 1.0
lowerCamelCase__ = self._encode_image(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# prior
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.scheduler.timesteps
lowerCamelCase__ = self.prior.config.num_embeddings
lowerCamelCase__ = self.prior.config.embedding_dim
lowerCamelCase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase__ = latents.reshape(latents.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.prior(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , proj_embedding=SCREAMING_SNAKE_CASE__ , ).predicted_image_embedding
# remove the variance
lowerCamelCase__ , lowerCamelCase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 )
lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase__ = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
for i, latent in enumerate(SCREAMING_SNAKE_CASE__ ):
print()
lowerCamelCase__ = self.renderer.decode(
latent[None, :] , SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.stack(SCREAMING_SNAKE_CASE__ )
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase__ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase__ = [self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_snake_case = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = ["""pixel_values"""]
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : str = True , SCREAMING_SNAKE_CASE__ : Any = None , SCREAMING_SNAKE_CASE__ : List[Any] = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : Tuple = True , SCREAMING_SNAKE_CASE__ : List[Any] = 1 / 2_55 , SCREAMING_SNAKE_CASE__ : Dict = True , SCREAMING_SNAKE_CASE__ : List[str] = None , SCREAMING_SNAKE_CASE__ : Any = None , SCREAMING_SNAKE_CASE__ : Any = True , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = size if size is not None else {'height': 3_84, 'width': 3_84}
lowerCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = resample
lowerCamelCase__ = do_rescale
lowerCamelCase__ = rescale_factor
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase__ = do_convert_rgb
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : Tuple = None , **SCREAMING_SNAKE_CASE__ : Any , ):
lowerCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
lowerCamelCase__ = (size['height'], size['width'])
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict = None , **SCREAMING_SNAKE_CASE__ : Any , ):
return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict = None , **SCREAMING_SNAKE_CASE__ : Any , ):
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] = None , SCREAMING_SNAKE_CASE__ : Optional[Any] = None , SCREAMING_SNAKE_CASE__ : List[Any] = None , SCREAMING_SNAKE_CASE__ : List[str] = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : List[Any] = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : Union[str, Any] = None , SCREAMING_SNAKE_CASE__ : Optional[Any] = None , SCREAMING_SNAKE_CASE__ : List[Any] = None , SCREAMING_SNAKE_CASE__ : str = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : List[Any] , ):
lowerCamelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ = resample if resample is not None else self.resample
lowerCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ = image_std if image_std is not None else self.image_std
lowerCamelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase__ = size if size is not None else self.size
lowerCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase__ = [convert_to_rgb(SCREAMING_SNAKE_CASE__ ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase__ = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
lowerCamelCase__ = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
lowerCamelCase__ = [self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
lowerCamelCase__ = [self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) for image in images]
lowerCamelCase__ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
lowerCamelCase__ = BatchFeature(data={'pixel_values': images} , tensor_type=SCREAMING_SNAKE_CASE__ )
return encoded_outputs
| 716 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_snake_case = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = VOCAB_FILES_NAMES
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = ['input_ids', 'attention_mask']
a_ : Union[str, Any] = NllbTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : Any="<mask>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=False , **SCREAMING_SNAKE_CASE__ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self : List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 659 | 0 |
"""simple docstring"""
from __future__ import annotations
def snake_case ( _a: Optional[int] , _a: str )-> Tuple:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = position
lowerCamelCase__ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowerCamelCase__ = []
for position in positions:
lowerCamelCase__ , lowerCamelCase__ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_a )
return permissible_positions
def snake_case ( _a: int )-> Dict:
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def snake_case ( _a: Dict , _a: List[str] , _a: int )-> List[Any]:
'''simple docstring'''
if is_complete(_a ):
return True
for position in get_valid_pos(_a , len(_a ) ):
lowerCamelCase__ , lowerCamelCase__ = position
if board[y][x] == 0:
lowerCamelCase__ = curr + 1
if open_knight_tour_helper(_a , _a , curr + 1 ):
return True
lowerCamelCase__ = 0
return False
def snake_case ( _a: int )-> Dict:
'''simple docstring'''
lowerCamelCase__ = [[0 for i in range(_a )] for j in range(_a )]
for i in range(_a ):
for j in range(_a ):
lowerCamelCase__ = 1
if open_knight_tour_helper(_a , (i, j) , 1 ):
return board
lowerCamelCase__ = 0
lowerCamelCase__ = F'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(_a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=99 , SCREAMING_SNAKE_CASE__ : Optional[Any]=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=37 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=5_12 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Any=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
lowerCamelCase__ = bos_token_id
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase__ = input_mask.numpy()
lowerCamelCase__ , lowerCamelCase__ = input_mask.shape
lowerCamelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = 1
lowerCamelCase__ = 0
lowerCamelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFBlipTextModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : str = (TFBlipTextModel,) if is_tf_available() else ()
a_ : List[str] = False
a_ : Optional[Any] = False
a_ : Union[str, Any] = False
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = BlipTextModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
pass
def _UpperCamelCase ( self : Tuple ):
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCamelCase ( self : List[str] ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : Dict ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : List[Any] ):
pass
@slow
def _UpperCamelCase ( self : str ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_snake_case = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _a :
a_ : Tuple = PegasusConfig
a_ : List[Any] = {}
a_ : Union[str, Any] = "gelu"
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any=13 , SCREAMING_SNAKE_CASE__ : Dict=7 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=99 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : List[Any]=5 , SCREAMING_SNAKE_CASE__ : Dict=4 , SCREAMING_SNAKE_CASE__ : Any=37 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=20 , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = eos_token_id
lowerCamelCase__ = pad_token_id
lowerCamelCase__ = bos_token_id
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowerCamelCase__ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase__ = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase__ = prepare_pegasus_inputs_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, inputs_dict
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = 20
lowerCamelCase__ = model_class_name(lowerCamelCase__ )
lowerCamelCase__ = model.encode(inputs_dict['input_ids'] )
lowerCamelCase__ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCamelCase__ = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
lowerCamelCase__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__ = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
lowerCamelCase__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCamelCase__ = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase__ , )
lowerCamelCase__ = model.decode(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ):
lowerCamelCase__ = 20
lowerCamelCase__ = model_class_name(lowerCamelCase__ )
lowerCamelCase__ = model.encode(inputs_dict['input_ids'] )
lowerCamelCase__ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCamelCase__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCamelCase__ = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__ = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
lowerCamelCase__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCamelCase__ = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
lowerCamelCase__ = model.decode(lowerCamelCase__ , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ )
lowerCamelCase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def snake_case ( _a: Dict , _a: int , _a: List[Any] , _a: Tuple=None , _a: Optional[int]=None , )-> Dict:
'''simple docstring'''
if attention_mask is None:
lowerCamelCase__ = np.not_equal(_a , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowerCamelCase__ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _a ( __lowerCAmelCase , unittest.TestCase ):
a_ : Dict = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
a_ : Any = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
a_ : Any = True
a_ : Optional[Any] = False
a_ : Tuple = False
a_ : Optional[int] = False
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = FlaxPegasusModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=lowerCamelCase__ )
def _UpperCamelCase ( self : Any ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase__ = model_class(lowerCamelCase__ )
@jax.jit
def encode_jitted(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return model.encode(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
with self.subTest('JIT Enabled' ):
lowerCamelCase__ = encode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCamelCase__ = encode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ = model_class(lowerCamelCase__ )
lowerCamelCase__ = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
lowerCamelCase__ = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ):
return model.decode(
decoder_input_ids=lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , encoder_outputs=lowerCamelCase__ , )
with self.subTest('JIT Enabled' ):
lowerCamelCase__ = decode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCamelCase__ = decode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _UpperCamelCase ( self : int ):
for model_class_name in self.all_model_classes:
lowerCamelCase__ = model_class_name.from_pretrained('google/pegasus-large' , from_pt=lowerCamelCase__ )
lowerCamelCase__ = np.ones((1, 1) )
lowerCamelCase__ = model(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@slow
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
lowerCamelCase__ = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
lowerCamelCase__ = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
lowerCamelCase__ = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
lowerCamelCase__ = tokenizer(lowerCamelCase__ , return_tensors='np' , truncation=lowerCamelCase__ , max_length=5_12 , padding=lowerCamelCase__ )
lowerCamelCase__ = model.generate(**lowerCamelCase__ , num_beams=2 ).sequences
lowerCamelCase__ = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
assert tgt_text == decoded
| 718 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 659 | 0 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_snake_case = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, "r", encoding="utf-8") as f:
_snake_case = json.load(f)
@require_torch
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ):
return FSMTTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = FSMTForConditionalGeneration.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowerCamelCase__ = F'facebook/wmt19-{pair}'
lowerCamelCase__ = self.get_tokenizer(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_model(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = bleu_data[pair]['''src''']
lowerCamelCase__ = bleu_data[pair]['''tgt''']
lowerCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='pt' , truncation=SCREAMING_SNAKE_CASE__ , padding='longest' ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCamelCase__ = tokenizer.batch_decode(
SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = calculate_bleu(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(scores['bleu'] , SCREAMING_SNAKE_CASE__ )
| 719 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
_snake_case = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {f"""funnel-transformer/{name}""": 512 for name in _model_names}
_snake_case = {f"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = VOCAB_FILES_NAMES
a_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
a_ : List[str] = FunnelTokenizer
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : int = 2
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : List[Any]="<sep>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : Tuple="<cls>" , SCREAMING_SNAKE_CASE__ : Tuple="<mask>" , SCREAMING_SNAKE_CASE__ : Any="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : int="##" , **SCREAMING_SNAKE_CASE__ : Any , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , clean_text=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , wordpieces_prefix=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('type' ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = do_lower_case
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
lowerCamelCase__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = size
# approximate the overall size of segment tree with given value
lowerCamelCase__ = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCamelCase__ = [0 for i in range(0 , 4 * size )]
lowerCamelCase__ = [0 for i in range(0 , 4 * size )] # flag for lazy update
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return idx * 2
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
return idx * 2 + 1
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ):
if left_element == right_element:
lowerCamelCase__ = a[left_element - 1]
else:
lowerCamelCase__ = (left_element + right_element) // 2
self.build(self.left(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.build(self.right(SCREAMING_SNAKE_CASE__ ) , mid + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = max(
self.segment_tree[self.left(SCREAMING_SNAKE_CASE__ )] , self.segment_tree[self.right(SCREAMING_SNAKE_CASE__ )] )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
if self.flag[idx] is True:
lowerCamelCase__ = self.lazy[idx]
lowerCamelCase__ = False
if left_element != right_element:
lowerCamelCase__ = self.lazy[idx]
lowerCamelCase__ = self.lazy[idx]
lowerCamelCase__ = True
lowerCamelCase__ = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCamelCase__ = val
if left_element != right_element:
lowerCamelCase__ = val
lowerCamelCase__ = val
lowerCamelCase__ = True
lowerCamelCase__ = True
return True
lowerCamelCase__ = (left_element + right_element) // 2
self.update(self.left(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.update(self.right(SCREAMING_SNAKE_CASE__ ) , mid + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = max(
self.segment_tree[self.left(SCREAMING_SNAKE_CASE__ )] , self.segment_tree[self.right(SCREAMING_SNAKE_CASE__ )] )
return True
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if self.flag[idx] is True:
lowerCamelCase__ = self.lazy[idx]
lowerCamelCase__ = False
if left_element != right_element:
lowerCamelCase__ = self.lazy[idx]
lowerCamelCase__ = self.lazy[idx]
lowerCamelCase__ = True
lowerCamelCase__ = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCamelCase__ = (left_element + right_element) // 2
lowerCamelCase__ = self.query(self.left(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.query(self.right(SCREAMING_SNAKE_CASE__ ) , mid + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __str__( self : str ):
return str([self.query(1 , 1 , self.size , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_snake_case = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_snake_case = 15
_snake_case = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 720 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case ( _a: Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = [-1] * len(_a )
def dfs(_a: Any , _a: Optional[int] ):
lowerCamelCase__ = True
lowerCamelCase__ = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 659 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_snake_case = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_snake_case = TypeVar("KEY")
_snake_case = TypeVar("VAL")
@dataclass(frozen=SCREAMING_SNAKE_CASE_ , slots=SCREAMING_SNAKE_CASE_ )
class _a ( Generic[KEY, VAL] ):
a_ : KEY
a_ : VAL
class _a ( _Item ):
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : str ):
return False
_snake_case = _DeletedItem()
class _a ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ):
lowerCamelCase__ = initial_block_size
lowerCamelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ = capacity_factor
lowerCamelCase__ = 0
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY ):
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int ):
return (ind + 1) % len(self._buckets )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
lowerCamelCase__ = self._buckets[ind]
if not stored:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self._buckets
lowerCamelCase__ = [None] * new_size
lowerCamelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _UpperCamelCase ( self : List[str] ):
self._resize(len(self._buckets ) * 2 )
def _UpperCamelCase ( self : Optional[int] ):
self._resize(len(self._buckets ) // 2 )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ):
lowerCamelCase__ = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : List[Any] ):
return self._len
def __iter__( self : Optional[int] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowerCamelCase__ = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 659 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_snake_case = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = VOCAB_FILES_NAMES
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = ['input_ids', 'attention_mask']
a_ : Union[str, Any] = NllbTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : Any="<mask>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=False , **SCREAMING_SNAKE_CASE__ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self : List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 700 |
"""simple docstring"""
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations(_a: int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a: int , _a: list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase__ = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
lowerCamelCase__ = answer
return answer
lowerCamelCase__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = [0] * (target + 1)
lowerCamelCase__ = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 659 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_snake_case = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 701 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class _a ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
a_ : str = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
a_ : ClassVar[Features] = Features({'question': Value('string' ), 'context': Value('string' )} )
a_ : ClassVar[Features] = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
a_ : str = "question"
a_ : str = "context"
a_ : str = "answers"
@property
def _UpperCamelCase ( self : int ):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 702 |
"""simple docstring"""
def snake_case ( _a: list[list[float]] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for data in source_data:
for i, el in enumerate(_a ):
if len(_a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_a ) )
return data_lists
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for dlist, weight in zip(_a , _a ):
lowerCamelCase__ = min(_a )
lowerCamelCase__ = max(_a )
lowerCamelCase__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCamelCase__ = F'Invalid weight of {weight:f} provided'
raise ValueError(_a )
score_lists.append(_a )
return score_lists
def snake_case ( _a: list[list[float]] )-> list[float]:
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_a ):
lowerCamelCase__ = final_scores[j] + ele
return final_scores
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = get_data(_a )
lowerCamelCase__ = calculate_each_score(_a , _a )
lowerCamelCase__ = generate_final_scores(_a )
# append scores to source data
for i, ele in enumerate(_a ):
source_data[i].append(_a )
return source_data
| 659 | 0 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _a :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , ):
lowerCamelCase__ = parent
lowerCamelCase__ = 13
lowerCamelCase__ = 7
lowerCamelCase__ = 30
lowerCamelCase__ = self.seq_length + self.mem_len
lowerCamelCase__ = 15
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = 99
lowerCamelCase__ = [10, 50, 80]
lowerCamelCase__ = 32
lowerCamelCase__ = 32
lowerCamelCase__ = 4
lowerCamelCase__ = 8
lowerCamelCase__ = 1_28
lowerCamelCase__ = 2
lowerCamelCase__ = 2
lowerCamelCase__ = None
lowerCamelCase__ = 1
lowerCamelCase__ = 0
lowerCamelCase__ = 3
lowerCamelCase__ = self.vocab_size - 1
lowerCamelCase__ = 0.01
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def _UpperCamelCase ( self : Union[str, Any] ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
lowerCamelCase__ = TFTransfoXLModel(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
lowerCamelCase__ = {'input_ids': input_ids_a, 'mems': mems_a}
lowerCamelCase__ , lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = TFTransfoXLLMHeadModel(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
lowerCamelCase__ = {'input_ids': input_ids_a, 'labels': lm_labels}
lowerCamelCase__ , lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
lowerCamelCase__ , lowerCamelCase__ = model([input_ids_a, mems_a] ).to_tuple()
lowerCamelCase__ = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels}
lowerCamelCase__ , lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = TFTransfoXLForSequenceClassification(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids_a}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Dict = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
a_ : int = () if is_tf_available() else ()
a_ : List[Any] = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
a_ : Optional[Any] = False
a_ : Dict = False
a_ : Any = False
a_ : Any = False
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = TFTransfoXLModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , d_embed=37 )
def _UpperCamelCase ( self : Any ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : List[str] ):
self.model_tester.set_seed()
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Union[str, Any] ):
self.model_tester.set_seed()
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowerCamelCase__ = model.get_output_embeddings()
assert isinstance(SCREAMING_SNAKE_CASE__ , tf.keras.layers.Layer )
lowerCamelCase__ = model.get_bias()
assert name is None
else:
lowerCamelCase__ = model.get_output_embeddings()
assert x is None
lowerCamelCase__ = model.get_bias()
assert name is None
def _UpperCamelCase ( self : int ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def _UpperCamelCase ( self : str ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFTransfoXLModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' )
def _UpperCamelCase ( self : Dict ):
pass
@require_tf
class _a ( unittest.TestCase ):
@unittest.skip('Skip test until #12651 is resolved.' )
@slow
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' )
# fmt: off
lowerCamelCase__ = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowerCamelCase__ = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ , max_length=2_00 , do_sample=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE__ )
| 703 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def snake_case ( _a: int , _a: int = 2 , _a: int = 1 , _a: int = 3 , )-> int | None:
'''simple docstring'''
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_a: int , _a: int , _a: int ) -> int:
return (pow(_a , 2 ) + step) % modulus
for _ in range(_a ):
# These track the position within the cycle detection logic.
lowerCamelCase__ = seed
lowerCamelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase__ = gcd(hare - tortoise , _a )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
_snake_case = parser.parse_args()
_snake_case = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
_snake_case = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 659 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
_snake_case = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_snake_case = {
"allenai/led-base-16384": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def snake_case ( )-> Tuple:
'''simple docstring'''
lowerCamelCase__ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowerCamelCase__ = bs[:]
lowerCamelCase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_a )
cs.append(2**8 + n )
n += 1
lowerCamelCase__ = [chr(_a ) for n in cs]
return dict(zip(_a , _a ) )
def snake_case ( _a: Optional[Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = set()
lowerCamelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase__ = char
return pairs
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = VOCAB_FILES_NAMES
a_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Tuple = ['input_ids', 'attention_mask']
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int="replace" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE__ : Dict="<unk>" , SCREAMING_SNAKE_CASE__ : str="<pad>" , SCREAMING_SNAKE_CASE__ : int="<mask>" , SCREAMING_SNAKE_CASE__ : Any=False , **SCREAMING_SNAKE_CASE__ : List[str] , ):
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase__ = json.load(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {v: k for k, v in self.encoder.items()}
lowerCamelCase__ = errors # how to handle errors in decoding
lowerCamelCase__ = bytes_to_unicode()
lowerCamelCase__ = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as merges_handle:
lowerCamelCase__ = merges_handle.read().split('\n' )[1:-1]
lowerCamelCase__ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase__ = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
lowerCamelCase__ = {}
lowerCamelCase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase__ = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _UpperCamelCase ( self : Dict ):
return len(self.encoder )
def _UpperCamelCase ( self : Dict ):
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : int ):
if token in self.cache:
return self.cache[token]
lowerCamelCase__ = tuple(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = get_pairs(SCREAMING_SNAKE_CASE__ )
if not pairs:
return token
while True:
lowerCamelCase__ = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase__ , lowerCamelCase__ = bigram
lowerCamelCase__ = []
lowerCamelCase__ = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
try:
lowerCamelCase__ = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase__ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase__ = tuple(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = new_word
if len(SCREAMING_SNAKE_CASE__ ) == 1:
break
else:
lowerCamelCase__ = get_pairs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ' '.join(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = word
return word
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE__ ).split(' ' ) )
return bpe_tokens
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] ):
return self.decoder.get(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = ''.join(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '\n' )
lowerCamelCase__ = 0
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase__ = token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE__ ) + '\n' )
index += 1
return vocab_file, merge_file
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str]=False , **SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE__ ) > 0 and not text[0].isspace()):
lowerCamelCase__ = ' ' + text
return (text, kwargs)
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Union[Dict[str, EncodedInput], BatchEncoding] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ):
lowerCamelCase__ = super()._pad(
encoded_inputs=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding_strategy=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
# Load from model defaults
if return_attention_mask is None:
lowerCamelCase__ = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCamelCase__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCamelCase__ = len(encoded_inputs['global_attention_mask'] ) != len(SCREAMING_SNAKE_CASE__ )
if needs_to_be_padded:
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCamelCase__ = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
lowerCamelCase__ = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 704 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 659 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 705 |
"""simple docstring"""
from __future__ import annotations
_snake_case = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def snake_case ( _a: list[list[int]] , _a: list[int] , _a: list[int] , _a: int , _a: list[list[int]] , )-> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the reference grid
lowerCamelCase__ = 1
lowerCamelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_a ) )
] # the action grid
lowerCamelCase__ = init[0]
lowerCamelCase__ = init[1]
lowerCamelCase__ = 0
lowerCamelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase__ = [[f, g, x, y]]
lowerCamelCase__ = False # flag that is set when search is complete
lowerCamelCase__ = False # flag set if we can't find expand
while not found and not resign:
if len(_a ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase__ = cell.pop()
lowerCamelCase__ = next_cell[2]
lowerCamelCase__ = next_cell[3]
lowerCamelCase__ = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase__ = True
else:
for i in range(len(_a ) ): # to try out different valid actions
lowerCamelCase__ = x + DIRECTIONS[i][0]
lowerCamelCase__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_a ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase__ = g + cost
lowerCamelCase__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase__ = 1
lowerCamelCase__ = i
lowerCamelCase__ = []
lowerCamelCase__ = goal[0]
lowerCamelCase__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase__ = x - DIRECTIONS[action[x][y]][0]
lowerCamelCase__ = y - DIRECTIONS[action[x][y]][1]
lowerCamelCase__ = xa
lowerCamelCase__ = ya
invpath.append([x, y] )
lowerCamelCase__ = []
for i in range(len(_a ) ):
path.append(invpath[len(_a ) - 1 - i] )
return path, action
if __name__ == "__main__":
_snake_case = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_snake_case = [0, 0]
# all coordinates are given in format [y,x]
_snake_case = [len(grid) - 1, len(grid[0]) - 1]
_snake_case = 1
# the cost map which pushes the path closer to the goal
_snake_case = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_snake_case = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_snake_case = 99
_snake_case , _snake_case = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 659 | 0 |
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def snake_case ( _a: Dict , _a: int )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for part_id in partition_order:
lowerCamelCase__ = df.where(F'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(_a ):
expected_row_ids_and_row_dicts.append((F'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCamelCase__ = spark.range(100 ).repartition(1 )
lowerCamelCase__ = Spark(_a )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCamelCase__ = spark.range(10 ).repartition(2 )
lowerCamelCase__ = [1, 0]
lowerCamelCase__ = _generate_iterable_examples(_a , _a ) # Reverse the partitions.
lowerCamelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , _a )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowerCamelCase__ , lowerCamelCase__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCamelCase__ = spark.range(10 ).repartition(1 )
lowerCamelCase__ = SparkExamplesIterable(_a )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_a ):
assert row_id == F'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( )-> Tuple:
'''simple docstring'''
lowerCamelCase__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCamelCase__ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
lowerCamelCase__ = lambda _a : x.reverse()
lowerCamelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , [2, 1, 0] )
lowerCamelCase__ = SparkExamplesIterable(_a ).shuffle_data_sources(_a )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_a ):
lowerCamelCase__ , lowerCamelCase__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCamelCase__ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowerCamelCase__ = SparkExamplesIterable(_a ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowerCamelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , [0, 2] )
for i, (row_id, row_dict) in enumerate(_a ):
lowerCamelCase__ , lowerCamelCase__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowerCamelCase__ = SparkExamplesIterable(_a ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowerCamelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , [1, 3] )
for i, (row_id, row_dict) in enumerate(_a ):
lowerCamelCase__ , lowerCamelCase__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCamelCase__ = spark.range(100 ).repartition(1 )
lowerCamelCase__ = Spark(_a )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 706 |
"""simple docstring"""
def snake_case ( _a: int = 4000000 )-> int:
'''simple docstring'''
lowerCamelCase__ = [0, 1]
lowerCamelCase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase__ = 0
for j in range(len(_a ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 659 | 0 |
_snake_case = 0 # The first color of the flag.
_snake_case = 1 # The second color of the flag.
_snake_case = 2 # The third color of the flag.
_snake_case = (red, white, blue)
def snake_case ( _a: list )-> list:
'''simple docstring'''
if not sequence:
return []
if len(_a ) == 1:
return list(_a )
lowerCamelCase__ = 0
lowerCamelCase__ = len(_a ) - 1
lowerCamelCase__ = 0
while mid <= high:
if sequence[mid] == colors[0]:
lowerCamelCase__ , lowerCamelCase__ = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowerCamelCase__ , lowerCamelCase__ = sequence[high], sequence[mid]
high -= 1
else:
lowerCamelCase__ = F'The elements inside the sequence must contains only {colors} values'
raise ValueError(_a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = input("Enter numbers separated by commas:\n").strip()
_snake_case = [int(item.strip()) for item in user_input.split(",")]
print(f"""{dutch_national_flag_sort(unsorted)}""")
| 707 |
"""simple docstring"""
def snake_case ( _a: List[Any] , _a: Any , _a: str , _a: List[Any] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = []
queue.append(_a )
lowerCamelCase__ = True
while queue:
lowerCamelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
lowerCamelCase__ = True
lowerCamelCase__ = u
return visited[t]
def snake_case ( _a: List[Any] , _a: str , _a: List[str] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = [-1] * (len(_a ))
lowerCamelCase__ = 0
while bfs(_a , _a , _a , _a ):
lowerCamelCase__ = float('Inf' )
lowerCamelCase__ = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ = min(_a , graph[parent[s]][s] )
lowerCamelCase__ = parent[s]
max_flow += path_flow
lowerCamelCase__ = sink
while v != source:
lowerCamelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ = parent[v]
return max_flow
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_snake_case , _snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 659 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=99 , SCREAMING_SNAKE_CASE__ : Optional[Any]=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=37 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=5_12 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Any=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
lowerCamelCase__ = bos_token_id
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase__ = input_mask.numpy()
lowerCamelCase__ , lowerCamelCase__ = input_mask.shape
lowerCamelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = 1
lowerCamelCase__ = 0
lowerCamelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFBlipTextModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : str = (TFBlipTextModel,) if is_tf_available() else ()
a_ : List[str] = False
a_ : Optional[Any] = False
a_ : Union[str, Any] = False
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = BlipTextModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
pass
def _UpperCamelCase ( self : Tuple ):
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCamelCase ( self : List[str] ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : Dict ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : List[Any] ):
pass
@slow
def _UpperCamelCase ( self : str ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE__ )
| 708 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_snake_case = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[int]=1 ):
lowerCamelCase__ = tokenizer
lowerCamelCase__ = dataset
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ ) if n_tasks is None else n_tasks
lowerCamelCase__ = n_copies
def __iter__( self : Any ):
lowerCamelCase__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = start_length
lowerCamelCase__ = eof_strings
lowerCamelCase__ = tokenizer
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: List[Any] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = re.split('(%s)' % '|'.join(_a ) , _a )
# last string should be ""
return "".join(string_list[:-2] )
def snake_case ( _a: List[Any] , _a: Optional[int] , _a: str , _a: Union[str, Any] , _a: Dict , _a: Optional[int]=20 , **_a: Optional[int] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = defaultdict(_a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_a ) ):
with torch.no_grad():
lowerCamelCase__ = batch['ids'].shape[-1]
lowerCamelCase__ = accelerator.unwrap_model(_a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_a , **_a )
# each task is generated batch_size times
lowerCamelCase__ = batch['task_id'].repeat(_a )
lowerCamelCase__ = accelerator.pad_across_processes(
_a , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ = generated_tokens.cpu().numpy()
lowerCamelCase__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_a , _a ):
gen_token_dict[task].append(_a )
lowerCamelCase__ = [[] for _ in range(_a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ = tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
code_gens[task].append(remove_last_block(_a ) )
return code_gens
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser(_a )
lowerCamelCase__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ = 'false'
if args.num_workers is None:
lowerCamelCase__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ = Accelerator()
set_seed(args.seed , device_specific=_a )
# Load model and tokenizer
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ = tokenizer.eos_token
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _a , _a )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ = load_dataset('openai_humaneval' )
lowerCamelCase__ = load_metric('code_eval' )
lowerCamelCase__ = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ = args.n_samples // args.batch_size
lowerCamelCase__ = TokenizedDataset(_a , human_eval['test'] , n_copies=_a , n_tasks=_a )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ = DataLoader(_a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(_a , _a )
lowerCamelCase__ = complete_code(
_a , _a , _a , _a , n_tasks=_a , batch_size=args.batch_size , **_a , )
if accelerator.is_main_process:
lowerCamelCase__ = []
for task in tqdm(range(_a ) ):
lowerCamelCase__ = human_eval['test'][task]['test']
lowerCamelCase__ = F'check({human_eval["test"][task]["entry_point"]})'
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ = code_eval_metric.compute(
references=_a , predictions=_a , num_workers=args.num_workers )
print(F'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_a , _a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def snake_case ( _a: Optional[Any]=None )-> Optional[Any]:
'''simple docstring'''
if subparsers is not None:
lowerCamelCase__ = subparsers.add_parser('test' )
else:
lowerCamelCase__ = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=_a , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_a )
return parser
def snake_case ( _a: Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
lowerCamelCase__ = script_name
else:
lowerCamelCase__ = F'--config_file={args.config_file} {script_name}'
lowerCamelCase__ = ['accelerate-launch'] + test_args.split()
lowerCamelCase__ = execute_subprocess_async(_a , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def snake_case ( )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = test_command_parser()
lowerCamelCase__ = parser.parse_args()
test_command(_a )
if __name__ == "__main__":
main()
| 709 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_a , help='where to store parsed gold_data_path file' , )
lowerCamelCase__ = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowerCamelCase__ = json.load(_a )
for dpr_record in tqdm(_a ):
lowerCamelCase__ = dpr_record['question']
lowerCamelCase__ = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_a ) + '\n' )
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
_snake_case = TypeVar("T")
_snake_case = Union[List[T], Tuple[T, ...]]
_snake_case = Union[T, List[T], Dict[str, T]]
_snake_case = Union[str, bytes, os.PathLike]
| 710 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 | 0 |
def snake_case ( _a: list[int] , _a: list[int] )-> None:
'''simple docstring'''
lowerCamelCase__ = len(_a )
print('The following activities are selected:' )
# The first activity is always selected
lowerCamelCase__ = 0
print(_a , end=',' )
# Consider rest of the activities
for j in range(_a ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_a , end=',' )
lowerCamelCase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = [1, 3, 0, 5, 8, 5]
_snake_case = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 711 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[str, Any] = 'swinv2'
a_ : Optional[int] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int=2_24 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : str=96 , SCREAMING_SNAKE_CASE__ : Dict=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4.0 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : int=32 , **SCREAMING_SNAKE_CASE__ : List[str] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embed_dim
lowerCamelCase__ = depths
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = num_heads
lowerCamelCase__ = window_size
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = hidden_act
lowerCamelCase__ = use_absolute_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
lowerCamelCase__ = (0, 0, 0, 0)
| 659 | 0 |
"""simple docstring"""
_snake_case = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_snake_case = [{"type": "code", "content": INSTALL_CONTENT}]
_snake_case = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 712 |
"""simple docstring"""
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 0
while number > 0:
lowerCamelCase__ = number % 10
sum_of_digits += last_digit
lowerCamelCase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case ( _a: int = 100 )-> int:
'''simple docstring'''
lowerCamelCase__ = factorial(_a )
lowerCamelCase__ = split_and_add(_a )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 659 | 0 |
"""simple docstring"""
from torch import nn
def snake_case ( _a: Dict )-> List[str]:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'Unsupported activation function: {act_fn}' )
| 713 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def snake_case ( _a: List[str] )-> Optional[MinHash]:
'''simple docstring'''
if len(_a ) < MIN_NUM_TOKENS:
return None
lowerCamelCase__ = MinHash(num_perm=_a )
for token in set(_a ):
min_hash.update(token.encode() )
return min_hash
def snake_case ( _a: str )-> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(_a ) if len(t.strip() ) > 0}
class _a :
def __init__( self : List[Any] , *,
SCREAMING_SNAKE_CASE__ : float = 0.85 , ):
lowerCamelCase__ = duplication_jaccard_threshold
lowerCamelCase__ = NUM_PERM
lowerCamelCase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : MinHash ):
lowerCamelCase__ = self._index.query(SCREAMING_SNAKE_CASE__ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(SCREAMING_SNAKE_CASE__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCamelCase__ = [base] + list(SCREAMING_SNAKE_CASE__ )
# reformat the cluster to be a list of dict
lowerCamelCase__ = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(SCREAMING_SNAKE_CASE__ )
return duplicate_clusters
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.get_duplicate_clusters()
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = element
lowerCamelCase__ = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case ( _a: Type[Dataset] )-> Tuple:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_a , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case ( _a: Type[Dataset] , _a: float )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = DuplicationIndex(duplication_jaccard_threshold=_a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_a ) ) , max_queue_size=100 ) ):
di.add(_a , _a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case ( _a: str , _a: str )-> float:
'''simple docstring'''
lowerCamelCase__ = get_tokens(_a )
lowerCamelCase__ = get_tokens(_a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def snake_case ( _a: Dict , _a: Union[str, Any] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for elementa in cluster:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
lowerCamelCase__ = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_a , _a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCamelCase__ = 1
extremes.append(_a )
return extremes
def snake_case ( _a: Any , _a: Tuple , _a: Dict )-> Union[str, Any]:
'''simple docstring'''
global _shared_dataset
lowerCamelCase__ = dataset
lowerCamelCase__ = []
lowerCamelCase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=_a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_a , _a , ) , total=len(_a ) , ):
extremes_list.append(_a )
return extremes_list
def snake_case ( _a: Type[Dataset] , _a: float = 0.85 )-> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
lowerCamelCase__ = make_duplicate_clusters(_a , _a )
lowerCamelCase__ = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
lowerCamelCase__ = {}
lowerCamelCase__ = find_extremes(_a , _a , _a )
for extremes in extremes_clusters:
for element in extremes:
lowerCamelCase__ = element
lowerCamelCase__ = duplicate_indices - set(extreme_dict.keys() )
lowerCamelCase__ = dataset.filter(lambda _a , _a : idx not in remove_indices , with_indices=_a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCamelCase__ = element['base_index'] in extreme_dict
if element["is_extreme"]:
lowerCamelCase__ = extreme_dict[element['base_index']]['copies']
print(F'Original dataset size: {len(_a )}' )
print(F'Number of duplicate clusters: {len(_a )}' )
print(F'Files in duplicate cluster: {len(_a )}' )
print(F'Unique files in duplicate cluster: {len(_a )}' )
print(F'Filtered dataset size: {len(_a )}' )
return ds_filter, duplicate_clusters
| 659 | 0 |
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
_snake_case = get_logger(__name__)
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=None ):
lowerCamelCase__ = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = module._original_module if isinstance(SCREAMING_SNAKE_CASE__ , _PatchedModuleObj ) else module
class _a :
a_ : Any = []
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int=None ):
lowerCamelCase__ = obj
lowerCamelCase__ = target
lowerCamelCase__ = new
lowerCamelCase__ = target.split('.' )[0]
lowerCamelCase__ = {}
lowerCamelCase__ = attrs or []
def __enter__( self : int ):
*lowerCamelCase__ , lowerCamelCase__ = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
try:
lowerCamelCase__ = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
lowerCamelCase__ = getattr(self.obj , SCREAMING_SNAKE_CASE__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(SCREAMING_SNAKE_CASE__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
lowerCamelCase__ = obj_attr
# patch at top level
setattr(self.obj , SCREAMING_SNAKE_CASE__ , _PatchedModuleObj(SCREAMING_SNAKE_CASE__ , attrs=self.attrs ) )
lowerCamelCase__ = getattr(self.obj , SCREAMING_SNAKE_CASE__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , _PatchedModuleObj(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , attrs=self.attrs ) )
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# finally set the target attribute
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
lowerCamelCase__ = getattr(import_module('.'.join(SCREAMING_SNAKE_CASE__ ) ) , SCREAMING_SNAKE_CASE__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , SCREAMING_SNAKE_CASE__ ) is attr_value:
lowerCamelCase__ = getattr(self.obj , SCREAMING_SNAKE_CASE__ )
setattr(self.obj , SCREAMING_SNAKE_CASE__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
lowerCamelCase__ = globals()['__builtins__'][target_attr]
setattr(self.obj , SCREAMING_SNAKE_CASE__ , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : int ):
for attr in list(self.original ):
setattr(self.obj , SCREAMING_SNAKE_CASE__ , self.original.pop(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self : Any ):
self.__enter__()
self._active_patches.append(self )
def _UpperCamelCase ( self : Tuple ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 714 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_snake_case = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def snake_case ( _a: Any )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = test_results.split(' ' )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase__ = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def snake_case ( _a: Optional[int] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = {}
lowerCamelCase__ = None
lowerCamelCase__ = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , _a ):
lowerCamelCase__ = True
lowerCamelCase__ = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
lowerCamelCase__ = line
lowerCamelCase__ = False
return failures
class _a :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = title
lowerCamelCase__ = doc_test_results['time_spent'].split(',' )[0]
lowerCamelCase__ = doc_test_results['success']
lowerCamelCase__ = doc_test_results['failures']
lowerCamelCase__ = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase__ = doc_test_results
@property
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = [self._time_spent]
lowerCamelCase__ = 0
for time in time_spent:
lowerCamelCase__ = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
lowerCamelCase__ = [0, 0, time_parts[0]]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'
@property
def _UpperCamelCase ( self : Dict ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self : Dict ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Any ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = 40
lowerCamelCase__ = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
lowerCamelCase__ = ''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def _UpperCamelCase ( ):
lowerCamelCase__ = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[int] ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
lowerCamelCase__ = F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
lowerCamelCase__ = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = ''
for key, value in failures.items():
lowerCamelCase__ = value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value
failures_text += F'*{key}*\n_{value}_\n\n'
lowerCamelCase__ = job_name
lowerCamelCase__ = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
lowerCamelCase__ = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self : Optional[int] ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
lowerCamelCase__ = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
lowerCamelCase__ = sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
lowerCamelCase__ = F'*Num failures* :{len(job_result["failed"] )} \n'
lowerCamelCase__ = job_result['failures']
lowerCamelCase__ = self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def snake_case ( )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = os.environ['GITHUB_RUN_ID']
lowerCamelCase__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
lowerCamelCase__ = requests.get(_a ).json()
lowerCamelCase__ = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
lowerCamelCase__ = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_a ):
lowerCamelCase__ = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , _a )
return {}
def snake_case ( _a: str )-> Dict:
'''simple docstring'''
lowerCamelCase__ = {}
if os.path.exists(_a ):
lowerCamelCase__ = os.listdir(_a )
for file in files:
try:
with open(os.path.join(_a , _a ) , encoding='utf-8' ) as f:
lowerCamelCase__ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(_a , _a )}.' ) from e
return _artifact
def snake_case ( )-> Optional[int]:
'''simple docstring'''
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = name
lowerCamelCase__ = []
def __str__( self : Dict ):
return self.name
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
self.paths.append({'name': self.name, 'path': path} )
lowerCamelCase__ = {}
lowerCamelCase__ = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase__ = directory
if artifact_name not in _available_artifacts:
lowerCamelCase__ = Artifact(_a )
_available_artifacts[artifact_name].add_path(_a )
return _available_artifacts
if __name__ == "__main__":
_snake_case = get_job_links()
_snake_case = retrieve_available_artifacts()
_snake_case = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_snake_case = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_snake_case = github_actions_job_links.get("run_doctests")
_snake_case = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_snake_case = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_snake_case , _snake_case , _snake_case = handle_test_results(artifact["stats"])
_snake_case = failed
_snake_case = success
_snake_case = time_spent[1:-1] + ", "
_snake_case = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_snake_case = line.replace("FAILED ", "")
_snake_case = line.split()[0].replace("\n", "")
if "::" in line:
_snake_case , _snake_case = line.split("::")
else:
_snake_case , _snake_case = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_snake_case = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_snake_case = all_failures[test] if test in all_failures else "N/A"
_snake_case = failure
break
_snake_case = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 659 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model"}
_snake_case = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
_snake_case = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
_snake_case = "▁"
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = VOCAB_FILES_NAMES
a_ : str = PRETRAINED_VOCAB_FILES_MAP
a_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple="<s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE__ : int="<s>" , SCREAMING_SNAKE_CASE__ : str="<unk>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="<mask>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
lowerCamelCase__ = len(self.sp_model ) - 1
lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCamelCase ( self : List[str] ):
return len(self.sp_model )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase__ = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
return spm_id if spm_id else self.unk_token_id
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = []
lowerCamelCase__ = ''
lowerCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
lowerCamelCase__ = True
lowerCamelCase__ = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __getstate__( self : str ):
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 715 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[PIL.Image.Image, np.ndarray]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : PriorTransformer , SCREAMING_SNAKE_CASE__ : CLIPVisionModel , SCREAMING_SNAKE_CASE__ : CLIPImageProcessor , SCREAMING_SNAKE_CASE__ : HeunDiscreteScheduler , SCREAMING_SNAKE_CASE__ : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=SCREAMING_SNAKE_CASE__ , image_encoder=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , renderer=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
if latents is None:
lowerCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase__ = latents.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase__ = torch.device(F'cuda:{gpu_id}' )
lowerCamelCase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self : Dict ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE__ , axis=0 ) if image[0].ndim == 4 else torch.stack(SCREAMING_SNAKE_CASE__ , axis=0 )
if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
lowerCamelCase__ = image.to(dtype=self.image_encoder.dtype , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.image_encoder(SCREAMING_SNAKE_CASE__ )['last_hidden_state']
lowerCamelCase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase__ = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ = torch.zeros_like(SCREAMING_SNAKE_CASE__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 25 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : float = 4.0 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ):
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowerCamelCase__ = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = image.shape[0]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(SCREAMING_SNAKE_CASE__ )}' )
lowerCamelCase__ = self._execution_device
lowerCamelCase__ = batch_size * num_images_per_prompt
lowerCamelCase__ = guidance_scale > 1.0
lowerCamelCase__ = self._encode_image(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# prior
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.scheduler.timesteps
lowerCamelCase__ = self.prior.config.num_embeddings
lowerCamelCase__ = self.prior.config.embedding_dim
lowerCamelCase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase__ = latents.reshape(latents.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.prior(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , proj_embedding=SCREAMING_SNAKE_CASE__ , ).predicted_image_embedding
# remove the variance
lowerCamelCase__ , lowerCamelCase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 )
lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase__ = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
for i, latent in enumerate(SCREAMING_SNAKE_CASE__ ):
print()
lowerCamelCase__ = self.renderer.decode(
latent[None, :] , SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.stack(SCREAMING_SNAKE_CASE__ )
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase__ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase__ = [self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _a :
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : Dict=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=99 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Dict=37 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=5_12 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Dict=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = 13
lowerCamelCase__ = 7
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = 99
lowerCamelCase__ = 32
lowerCamelCase__ = 2
lowerCamelCase__ = 4
lowerCamelCase__ = 37
lowerCamelCase__ = 'gelu'
lowerCamelCase__ = 0.1
lowerCamelCase__ = 0.1
lowerCamelCase__ = 5_12
lowerCamelCase__ = 16
lowerCamelCase__ = 2
lowerCamelCase__ = 0.02
lowerCamelCase__ = 3
lowerCamelCase__ = 4
lowerCamelCase__ = None
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=SCREAMING_SNAKE_CASE__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = TFRoFormerModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ = [input_ids, input_mask]
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = True
lowerCamelCase__ = TFRoFormerForCausalLM(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = TFRoFormerForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFRoFormerForSequenceClassification(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.num_choices
lowerCamelCase__ = TFRoFormerForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = TFRoFormerForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
lowerCamelCase__ = TFRoFormerForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Tuple = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
a_ : Union[str, Any] = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ : Dict = False
a_ : str = False
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = TFRoFormerModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Any ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_tf
class _a ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
lowerCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )[0]
# TODO Replace vocab size
lowerCamelCase__ = 5_00_00
lowerCamelCase__ = [1, 6, vocab_size]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowerCamelCase__ = tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 )
@require_tf
class _a ( unittest.TestCase ):
a_ : Optional[int] = 1e-4
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = tf.constant([[4, 10]] )
lowerCamelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowerCamelCase__ = emba(input_ids.shape )
lowerCamelCase__ = tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=self.tolerance )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
lowerCamelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 )
emba([2, 16, 5_12] )
lowerCamelCase__ = emba.weight[:3, :5]
tf.debugging.assert_near(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=self.tolerance )
@require_tf
class _a ( unittest.TestCase ):
a_ : Union[str, Any] = 1e-4
def _UpperCamelCase ( self : Any ):
# 2,12,16,64
lowerCamelCase__ = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
lowerCamelCase__ = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
lowerCamelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowerCamelCase__ = embed_positions([2, 16, 7_68] )[None, None, :, :]
lowerCamelCase__ , lowerCamelCase__ = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
lowerCamelCase__ = tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , SCREAMING_SNAKE_CASE__ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , SCREAMING_SNAKE_CASE__ , atol=self.tolerance )
| 716 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_snake_case = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = VOCAB_FILES_NAMES
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = ['input_ids', 'attention_mask']
a_ : Union[str, Any] = NllbTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : Any="<mask>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=False , **SCREAMING_SNAKE_CASE__ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self : List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 659 | 0 |
"""simple docstring"""
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations(_a: int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a: int , _a: list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase__ = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
lowerCamelCase__ = answer
return answer
lowerCamelCase__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = [0] * (target + 1)
lowerCamelCase__ = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 717 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=99 , SCREAMING_SNAKE_CASE__ : Optional[Any]=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=37 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=5_12 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Any=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
lowerCamelCase__ = bos_token_id
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase__ = input_mask.numpy()
lowerCamelCase__ , lowerCamelCase__ = input_mask.shape
lowerCamelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = 1
lowerCamelCase__ = 0
lowerCamelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = TFBlipTextModel(config=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : str = (TFBlipTextModel,) if is_tf_available() else ()
a_ : List[str] = False
a_ : Optional[Any] = False
a_ : Union[str, Any] = False
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = BlipTextModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Tuple ):
pass
def _UpperCamelCase ( self : Tuple ):
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCamelCase ( self : List[str] ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : Dict ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCamelCase ( self : List[Any] ):
pass
@slow
def _UpperCamelCase ( self : str ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
_snake_case = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_snake_case = [{"type": "code", "content": INSTALL_CONTENT}]
_snake_case = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 718 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 659 | 0 |
"""simple docstring"""
import qiskit
def snake_case ( _a: int , _a: int )-> qiskit.result.counts.Counts:
'''simple docstring'''
lowerCamelCase__ = qiskit.Aer.get_backend('aer_simulator' )
lowerCamelCase__ = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
lowerCamelCase__ = qiskit.execute(_a , _a , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_a )
if __name__ == "__main__":
_snake_case = half_adder(1, 1)
print(f"""Half Adder Output Qubit Counts: {counts}""")
| 719 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
_snake_case = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {f"""funnel-transformer/{name}""": 512 for name in _model_names}
_snake_case = {f"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = VOCAB_FILES_NAMES
a_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
a_ : List[str] = FunnelTokenizer
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : int = 2
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : List[Any]="<sep>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : Tuple="<cls>" , SCREAMING_SNAKE_CASE__ : Tuple="<mask>" , SCREAMING_SNAKE_CASE__ : Any="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : int="##" , **SCREAMING_SNAKE_CASE__ : Any , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , clean_text=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , wordpieces_prefix=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('type' ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = do_lower_case
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
lowerCamelCase__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 659 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_snake_case = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def snake_case ( _a: Optional[int] , _a: List[Any] , _a: List[Any]=None )-> str:
'''simple docstring'''
if rng is None:
lowerCamelCase__ = random.Random()
lowerCamelCase__ = 1
for dim in shape:
total_dims *= dim
lowerCamelCase__ = []
for _ in range(_a ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowerCamelCase__ = np.array(_a , dtype=jnp.intaa ).reshape(_a )
return output
def snake_case ( _a: Optional[Any] , _a: Tuple=None )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = ids_tensor(_a , vocab_size=2 , rng=_a )
# make sure that at least one token is attended to for each batch
lowerCamelCase__ = 1
return attn_mask
@require_flax
class _a :
a_ : Optional[Any] = None
a_ : List[Any] = ()
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowerCamelCase__ = 2
lowerCamelCase__ = inputs['input_ids'].shape[-1] // 2
lowerCamelCase__ = inputs['input_ids'][:max_batch_size, :sequence_length]
lowerCamelCase__ = jnp.ones_like(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowerCamelCase__ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowerCamelCase__ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
lowerCamelCase__ = False
lowerCamelCase__ = max_length
lowerCamelCase__ = 0
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pt_model_class(SCREAMING_SNAKE_CASE__ ).eval()
lowerCamelCase__ = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , flax_model.params )
lowerCamelCase__ = flax_model.generate(SCREAMING_SNAKE_CASE__ ).sequences
lowerCamelCase__ = pt_model.generate(torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowerCamelCase__ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
lowerCamelCase__ = False
lowerCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
lowerCamelCase__ = True
lowerCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
lowerCamelCase__ = False
lowerCamelCase__ = max_length
lowerCamelCase__ = 2
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
lowerCamelCase__ = False
lowerCamelCase__ = max_length
lowerCamelCase__ = 2
lowerCamelCase__ = 2
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
lowerCamelCase__ = True
lowerCamelCase__ = max_length
lowerCamelCase__ = 0.8
lowerCamelCase__ = 10
lowerCamelCase__ = 0.3
lowerCamelCase__ = 1
lowerCamelCase__ = 8
lowerCamelCase__ = 9
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
lowerCamelCase__ = max_length
lowerCamelCase__ = 1
lowerCamelCase__ = 8
lowerCamelCase__ = 9
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
lowerCamelCase__ = max_length
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 8
lowerCamelCase__ = 9
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCamelCase__ = attention_mask.at[(0, 0)].set(0 )
lowerCamelCase__ = False
lowerCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCamelCase__ = attention_mask.at[(0, 0)].set(0 )
lowerCamelCase__ = True
lowerCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCamelCase__ = attention_mask.at[(0, 0)].set(0 )
lowerCamelCase__ = 2
lowerCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert' )
lowerCamelCase__ = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
lowerCamelCase__ = 'Hello world'
lowerCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='np' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'do_samples' ):
model.generate(SCREAMING_SNAKE_CASE__ , do_samples=SCREAMING_SNAKE_CASE__ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'foo' ):
lowerCamelCase__ = {'foo': 'bar'}
model.generate(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 720 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case ( _a: Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = [-1] * len(_a )
def dfs(_a: Any , _a: Optional[int] ):
lowerCamelCase__ = True
lowerCamelCase__ = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 659 | 0 |
"""simple docstring"""
def snake_case ( _a: List[Any] , _a: Any , _a: str , _a: List[Any] )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = [False] * len(_a )
lowerCamelCase__ = []
queue.append(_a )
lowerCamelCase__ = True
while queue:
lowerCamelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
lowerCamelCase__ = True
lowerCamelCase__ = u
return visited[t]
def snake_case ( _a: List[Any] , _a: str , _a: List[str] )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = [-1] * (len(_a ))
lowerCamelCase__ = 0
while bfs(_a , _a , _a , _a ):
lowerCamelCase__ = float('Inf' )
lowerCamelCase__ = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ = min(_a , graph[parent[s]][s] )
lowerCamelCase__ = parent[s]
max_flow += path_flow
lowerCamelCase__ = sink
while v != source:
lowerCamelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ = parent[v]
return max_flow
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_snake_case , _snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 721 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_snake_case = TypeVar("KEY")
_snake_case = TypeVar("VAL")
@dataclass(frozen=SCREAMING_SNAKE_CASE_ , slots=SCREAMING_SNAKE_CASE_ )
class _a ( Generic[KEY, VAL] ):
a_ : KEY
a_ : VAL
class _a ( _Item ):
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : str ):
return False
_snake_case = _DeletedItem()
class _a ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ):
lowerCamelCase__ = initial_block_size
lowerCamelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ = capacity_factor
lowerCamelCase__ = 0
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY ):
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int ):
return (ind + 1) % len(self._buckets )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
lowerCamelCase__ = self._buckets[ind]
if not stored:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self._buckets
lowerCamelCase__ = [None] * new_size
lowerCamelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _UpperCamelCase ( self : List[str] ):
self._resize(len(self._buckets ) * 2 )
def _UpperCamelCase ( self : Optional[int] ):
self._resize(len(self._buckets ) // 2 )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ):
lowerCamelCase__ = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : List[Any] ):
return self._len
def __iter__( self : Optional[int] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowerCamelCase__ = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 659 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.