code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowerCAmelCase ( ) -> List[str]:
lowercase : Optional[int] =ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__magic_name__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=__magic_name__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__magic_name__ )
return parser.parse_args()
def _lowerCAmelCase ( ) -> List[str]:
lowercase : Dict =parse_args()
# Import training_script as a module.
lowercase : str =Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase : Dict =script_fpath.stem
lowercase : Union[str, Any] =importlib.import_module(__magic_name__ )
# Patch sys.argv
lowercase : Tuple =[args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 92 |
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__(self , lowercase__="</s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__=1_25 , lowercase__=None , **lowercase__ , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
snake_case_ : Optional[Any] = [f'<extra_id_{i}>' for i in range(lowercase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case_ : int = len(set(filter(lambda lowercase__ : bool("""extra_id""" in str(lowercase__ ) ) , lowercase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
snake_case_ : Union[str, Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else pad_token
snake_case_ : Optional[int] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else eos_token
snake_case_ : Optional[Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else unk_token
super().__init__(
eos_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , extra_ids=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , )
snake_case_ : List[str] = extra_ids
snake_case_ : Union[str, Any] = 2**8 # utf is 8 bits
# define special tokens dict
snake_case_ : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
snake_case_ : List[Any] = len(self.special_tokens_encoder )
snake_case_ : Tuple = len(lowercase__ )
for i, token in enumerate(lowercase__ ):
snake_case_ : Union[str, Any] = self.vocab_size + i - n
snake_case_ : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __UpperCamelCase (self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase__ )) + [1]
return ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
def __UpperCamelCase (self , lowercase__ ):
if len(lowercase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : List[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : Union[str, Any] = self._add_eos_if_not_present(lowercase__ )
if token_ids_a is None:
return token_ids_a
else:
snake_case_ : int = self._add_eos_if_not_present(lowercase__ )
return token_ids_a + token_ids_a
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : List[str] = [chr(lowercase__ ) for i in text.encode("""utf-8""" )]
return tokens
def __UpperCamelCase (self , lowercase__ ):
if token in self.special_tokens_encoder:
snake_case_ : Any = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
snake_case_ : Optional[int] = self.added_tokens_encoder[token]
elif len(lowercase__ ) != 1:
snake_case_ : Dict = self.unk_token_id
else:
snake_case_ : List[str] = ord(lowercase__ ) + self._num_special_tokens
return token_id
def __UpperCamelCase (self , lowercase__ ):
if index in self.special_tokens_decoder:
snake_case_ : Tuple = self.special_tokens_decoder[index]
else:
snake_case_ : Dict = chr(index - self._num_special_tokens )
return token
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[int] = B""""""
for token in tokens:
if token in self.special_tokens_decoder:
snake_case_ : List[Any] = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
snake_case_ : Union[str, Any] = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
snake_case_ : Optional[int] = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
snake_case_ : Any = token.encode("""utf-8""" )
else:
snake_case_ : Dict = bytes([ord(lowercase__ )] )
bstring += tok_string
snake_case_ : List[Any] = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
return ()
| 480 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A ( a ):
__UpperCAmelCase : List[str] = """naver-clova-ix/donut-base-finetuned-docvqa"""
__UpperCAmelCase : Tuple = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
__UpperCAmelCase : Optional[Any] = """document_qa"""
__UpperCAmelCase : str = AutoProcessor
__UpperCAmelCase : Any = VisionEncoderDecoderModel
__UpperCAmelCase : List[str] = ["""image""", """text"""]
__UpperCAmelCase : Optional[int] = ["""text"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple:
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any:
_a = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
_a = task_prompt.replace("{user_input}" , snake_case_ )
_a = self.pre_processor.tokenizer(
snake_case_ , add_special_tokens=snake_case_ , return_tensors="pt" ).input_ids
_a = self.pre_processor(snake_case_ , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __lowerCAmelCase ( self , snake_case_ ) -> Dict:
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=snake_case_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=snake_case_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=snake_case_ , ).sequences
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
_a = self.pre_processor.batch_decode(snake_case_ )[0]
_a = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
_a = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
_a = re.sub(R"<.*?>" , "" , snake_case_ , count=1 ).strip() # remove first task start token
_a = self.pre_processor.tokenajson(snake_case_ )
return sequence["answer"]
| 691 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__snake_case : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__snake_case : Optional[Any] = 12_8022
__snake_case : List[str] = 12_8028
@require_sentencepiece
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = MaMaaaTokenizer
__UpperCAmelCase : int = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Tuple = True
def __lowerCAmelCase ( self ) -> Any:
super().setUp()
_a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_a = Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
_a = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **snake_case_ ) -> str:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "</s>"
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.get_tokenizer()
_a = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(snake_case_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __lowerCAmelCase ( self ) -> Any:
pass
def __lowerCAmelCase ( self ) -> Dict:
_a = self.get_tokenizer()
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [2, 3, 4, 5, 6] , )
_a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
_a = tokenizer.convert_tokens_to_string(snake_case_ )
self.assertEqual(snake_case_ , "This is a test" )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
# fmt: off
_a = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
__UpperCAmelCase : Any = """facebook/m2m100_418M"""
__UpperCAmelCase : Dict = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__UpperCAmelCase : Optional[Any] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__UpperCAmelCase : Any = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def __lowerCAmelCase ( cls ) -> int:
_a = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
_a = 1
return cls
def __lowerCAmelCase ( self ) -> Any:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.tokenizer.get_vocab()
self.assertEqual(len(snake_case_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = "en"
_a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.assertIn(snake_case_ , self.tokenizer.all_special_ids )
# fmt: off
_a = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
_a = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
_a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertNotIn(self.tokenizer.eos_token , snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = tempfile.mkdtemp()
_a = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(snake_case_ )
_a = MaMaaaTokenizer.from_pretrained(snake_case_ )
self.assertDictEqual(new_tok.lang_token_to_id , snake_case_ )
@require_torch
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "en"
_a = "fr"
_a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors="pt" )
_a = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_a = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_a = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCAmelCase ( self ) -> List[Any]:
_a = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_a = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCAmelCase ( self ) -> int:
_a = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(snake_case_ ) , {
# en_XX, A, test, EOS
"input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 1_2_8_0_0_6,
} , )
| 691 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( a_ : list[list[float]] ):
__a = []
for data in source_data:
for i, el in enumerate(a_ ):
if len(a_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a_ ) )
return data_lists
def SCREAMING_SNAKE_CASE ( a_ : list[list[float]] , a_ : list[int] ):
__a = []
for dlist, weight in zip(a_ , a_ ):
__a = min(a_ )
__a = max(a_ )
__a = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
__a = f"Invalid weight of {weight:f} provided"
raise ValueError(a_ )
score_lists.append(a_ )
return score_lists
def SCREAMING_SNAKE_CASE ( a_ : list[list[float]] ):
__a = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a_ ):
__a = final_scores[j] + ele
return final_scores
def SCREAMING_SNAKE_CASE ( a_ : list[list[float]] , a_ : list[int] ):
__a = get_data(a_ )
__a = calculate_each_score(a_ , a_ )
__a = generate_final_scores(a_ )
# append scores to source data
for i, ele in enumerate(a_ ):
source_data[i].append(a_ )
return source_data
| 539 |
'''simple docstring'''
class __lowercase : # Public class to implement a graph
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None:
__a = row
__a = col
__a = graph
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None:
# Checking all 8 elements surrounding nth element
__a = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__a = [-1, 0, 1, -1, 1, -1, 0, 1]
__a = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase )
def UpperCamelCase__ ( self ) -> int: # And finally, count all islands.
__a = [[False for j in range(self.COL )] for i in range(self.ROW )]
__a = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase , UpperCamelCase , UpperCamelCase )
count += 1
return count
| 539 | 1 |
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
lowercase__ = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def __magic_name__ ( _lowerCamelCase : Any ):
__a : List[str] = {}
state_dict.pop("""pixel_mean""" , _lowerCamelCase )
state_dict.pop("""pixel_std""" , _lowerCamelCase )
__a : Any = r""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__a : str = key.replace(_lowerCamelCase , _lowerCamelCase )
if re.match(_lowerCamelCase , _lowerCamelCase ):
__a : List[Any] = int(re.match(_lowerCamelCase , _lowerCamelCase ).group(2 ) )
if layer_nb == 0:
__a : int = key.replace("""layers.0""" , """proj_in""" )
elif layer_nb == 1:
__a : Union[str, Any] = key.replace("""layers.1""" , """layers.0""" )
elif layer_nb == 2:
__a : Optional[int] = key.replace("""layers.2""" , """proj_out""" )
__a : Dict = value
__a : Optional[int] = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict , _lowerCamelCase : int="ybelkada/segment-anything" ):
__a : int = hf_hub_download(_lowerCamelCase , F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
__a : Optional[int] = SamConfig()
elif "sam_vit_l" in model_name:
__a : Union[str, Any] = SamVisionConfig(
hidden_size=1_0_2_4 , num_hidden_layers=2_4 , num_attention_heads=1_6 , global_attn_indexes=[5, 1_1, 1_7, 2_3] , )
__a : Optional[Any] = SamConfig(
vision_config=_lowerCamelCase , )
elif "sam_vit_h" in model_name:
__a : Optional[int] = SamVisionConfig(
hidden_size=1_2_8_0 , num_hidden_layers=3_2 , num_attention_heads=1_6 , global_attn_indexes=[7, 1_5, 2_3, 3_1] , )
__a : Optional[int] = SamConfig(
vision_config=_lowerCamelCase , )
__a : str = torch.load(_lowerCamelCase , map_location="""cpu""" )
__a : Dict = replace_keys(_lowerCamelCase )
__a : Any = SamImageProcessor()
__a : Union[str, Any] = SamProcessor(image_processor=_lowerCamelCase )
__a : int = SamModel(_lowerCamelCase )
hf_model.load_state_dict(_lowerCamelCase )
__a : List[Any] = hf_model.to("""cuda""" )
__a : Tuple = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
__a : str = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
__a : str = [[[4_0_0, 6_5_0]]]
__a : Any = [[1]]
__a : Optional[int] = processor(images=np.array(_lowerCamelCase ) , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__a : Optional[Any] = hf_model(**_lowerCamelCase )
__a : Any = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
__a : Tuple = processor(
images=np.array(_lowerCamelCase ) , input_points=_lowerCamelCase , input_labels=_lowerCamelCase , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__a : int = hf_model(**_lowerCamelCase )
__a : Any = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
__a : Union[str, Any] = ((7_5, 2_7_5, 1_7_2_5, 8_5_0),)
__a : Optional[int] = processor(images=np.array(_lowerCamelCase ) , input_boxes=_lowerCamelCase , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__a : Optional[int] = hf_model(**_lowerCamelCase )
__a : Optional[int] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
__a : int = [[[4_0_0, 6_5_0], [8_0_0, 6_5_0]]]
__a : str = [[1, 1]]
__a : Dict = processor(
images=np.array(_lowerCamelCase ) , input_points=_lowerCamelCase , input_labels=_lowerCamelCase , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__a : Optional[int] = hf_model(**_lowerCamelCase )
__a : int = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
lowercase__ = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
lowercase__ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 63 |
"""simple docstring"""
import os
def __magic_name__ ( _lowerCamelCase : Dict ):
__a : List[str] = len(grid[0] )
__a : int = len(_lowerCamelCase )
__a : Tuple = 0
__a : List[Any] = 0
__a : List[str] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_lowerCamelCase ):
for j in range(n_rows - 3 ):
__a : List[Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__a : Tuple = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__a : List[Any] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__a : List[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__a : str = max(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if max_product > largest:
__a : Optional[Any] = max_product
return largest
def __magic_name__ ( ):
__a : Tuple = []
with open(os.path.dirname(_lowerCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
__a : Tuple = [[int(_lowerCamelCase ) for i in grid[j]] for j in range(len(_lowerCamelCase ) )]
return largest_product(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 63 | 1 |
"""simple docstring"""
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
if not (isinstance(a__ , a__ ) and isinstance(a__ , a__ )):
raise ValueError('longest_common_substring() takes two strings for inputs' )
lowerCAmelCase :str = len(a__ )
lowerCAmelCase :List[Any] = len(a__ )
lowerCAmelCase :List[Any] = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
lowerCAmelCase :List[Any] = 0
lowerCAmelCase :List[Any] = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
lowerCAmelCase :str = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
lowerCAmelCase :Any = i
lowerCAmelCase :Optional[int] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod() | 553 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase )
class __UpperCamelCase ( UpperCamelCase ):
def __init__( self : List[Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Tuple ) -> Optional[int]:
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCAmelCase__ ( self : List[str] , UpperCAmelCase : str=None , UpperCAmelCase : str=None , UpperCAmelCase : List[str]=None ) -> Tuple:
lowerCAmelCase :Optional[Any] = {}
lowerCAmelCase :int = {}
if prompt is not None:
lowerCAmelCase :Any = prompt
if generate_kwargs is not None:
lowerCAmelCase :List[Any] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowerCAmelCase :Any = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
lowerCAmelCase :Union[str, Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Tuple , UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCAmelCase : Any ) -> Optional[int]:
return super().__call__(UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Any=None ) -> str:
lowerCAmelCase :Dict = load_image(UpperCAmelCase )
if prompt is not None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError(
f"""Received an invalid text input, got - {type(UpperCAmelCase )} - but expected a single string. """
'Note also that one single text can be provided for conditional image to text generation.' )
lowerCAmelCase :List[str] = self.model.config.model_type
if model_type == "git":
lowerCAmelCase :Any = self.image_processor(images=UpperCAmelCase , return_tensors=self.framework )
lowerCAmelCase :Union[str, Any] = self.tokenizer(text=UpperCAmelCase , add_special_tokens=UpperCAmelCase ).input_ids
lowerCAmelCase :Optional[int] = [self.tokenizer.cls_token_id] + input_ids
lowerCAmelCase :Union[str, Any] = torch.tensor(UpperCAmelCase ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
lowerCAmelCase :Optional[Any] = self.image_processor(images=UpperCAmelCase , header_text=UpperCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowerCAmelCase :int = self.image_processor(images=UpperCAmelCase , return_tensors=self.framework )
lowerCAmelCase :Optional[Any] = self.tokenizer(UpperCAmelCase , return_tensors=self.framework )
model_inputs.update(UpperCAmelCase )
else:
raise ValueError(f"""Model type {model_type} does not support conditional text generation""" )
else:
lowerCAmelCase :str = self.image_processor(images=UpperCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowerCAmelCase :Union[str, Any] = None
return model_inputs
def UpperCAmelCase__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any=None ) -> int:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , UpperCAmelCase )
and all(x is None for x in model_inputs['input_ids'] )
):
lowerCAmelCase :Optional[int] = None
if generate_kwargs is None:
lowerCAmelCase :Optional[int] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowerCAmelCase :Optional[int] = model_inputs.pop(self.model.main_input_name )
lowerCAmelCase :Any = self.model.generate(UpperCAmelCase , **UpperCAmelCase , **UpperCAmelCase )
return model_outputs
def UpperCAmelCase__ ( self : Dict , UpperCAmelCase : Union[str, Any] ) -> Any:
lowerCAmelCase :str = []
for output_ids in model_outputs:
lowerCAmelCase :Optional[int] = {
'generated_text': self.tokenizer.decode(
UpperCAmelCase , skip_special_tokens=UpperCAmelCase , )
}
records.append(UpperCAmelCase )
return records | 553 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : Optional[int] = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''openai-gpt'''
_UpperCAmelCase = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Dict , __lowerCAmelCase : Dict=4_0478 , __lowerCAmelCase : Optional[int]=512 , __lowerCAmelCase : int=768 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : str=1E-5 , __lowerCAmelCase : Optional[int]=0.0_2 , __lowerCAmelCase : Optional[Any]="cls_index" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Any=True , __lowerCAmelCase : str=0.1 , **__lowerCAmelCase : Dict , ) -> List[Any]:
"""simple docstring"""
a = vocab_size
a = n_positions
a = n_embd
a = n_layer
a = n_head
a = afn
a = resid_pdrop
a = embd_pdrop
a = attn_pdrop
a = layer_norm_epsilon
a = initializer_range
a = summary_type
a = summary_use_proj
a = summary_activation
a = summary_first_dropout
a = summary_proj_to_labels
super().__init__(**__lowerCAmelCase )
| 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : List[Any] = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''rwkv'''
_UpperCAmelCase = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any]=5_0277 , __lowerCAmelCase : str=1024 , __lowerCAmelCase : Union[str, Any]=4096 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[Any]=1E-5 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=True , **__lowerCAmelCase : List[str] , ) -> List[Any]:
"""simple docstring"""
a = vocab_size
a = context_length
a = hidden_size
a = num_hidden_layers
a = attention_hidden_size if attention_hidden_size is not None else hidden_size
a = intermediate_size if intermediate_size is not None else 4 * hidden_size
a = layer_norm_epsilon
a = rescale_every
a = use_cache
a = bos_token_id
a = eos_token_id
super().__init__(
tie_word_embeddings=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
| 32 | 1 |
'''simple docstring'''
import numpy as np
import qiskit
def a_ ( lowerCamelCase : int = 8 , lowerCamelCase : int | None = None ):
lowerCAmelCase = np.random.default_rng(seed=lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
lowerCAmelCase = 6 * key_len
# Measurement basis for Alice's qubits.
lowerCAmelCase = rng.integers(2 , size=lowerCamelCase )
# The set of states Alice will prepare.
lowerCAmelCase = rng.integers(2 , size=lowerCamelCase )
# Measurement basis for Bob's qubits.
lowerCAmelCase = rng.integers(2 , size=lowerCamelCase )
# Quantum Circuit to simulate BB84
lowerCAmelCase = qiskit.QuantumCircuit(lowerCamelCase , name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
lowerCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
lowerCAmelCase = qiskit.execute(lowerCamelCase , lowerCamelCase , shots=1 , seed_simulator=lowerCamelCase )
# Returns the result of measurement.
lowerCAmelCase = job.result().get_counts(lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
lowerCAmelCase = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
lowerCamelCase , lowerCamelCase , lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
lowerCAmelCase = gen_key[:key_len] if len(lowerCamelCase ) >= key_len else gen_key.ljust(lowerCamelCase , '0' )
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 133 |
'''simple docstring'''
def a_ ( lowerCamelCase : int , lowerCamelCase : int ):
return int(input_a == input_a == 0 )
def a_ ( ):
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(f'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(f'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(f'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(f'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 133 | 1 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
a = logging.getLogger(__name__)
def lowercase (snake_case__ : str , snake_case__ : Optional[Any] ) -> Any:
'''simple docstring'''
if os.path.exists(UpperCAmelCase__ ):
if os.path.exists(os.path.join(UpperCAmelCase__ , """config.json""" ) ) and os.path.isfile(
os.path.join(UpperCAmelCase__ , """config.json""" ) ):
os.remove(os.path.join(UpperCAmelCase__ , """config.json""" ) )
if os.path.exists(os.path.join(UpperCAmelCase__ , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(UpperCAmelCase__ , """pytorch_model.bin""" ) ):
os.remove(os.path.join(UpperCAmelCase__ , """pytorch_model.bin""" ) )
else:
os.makedirs(UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
def lowercase (snake_case__ : List[str] , snake_case__ : Union[str, Any]=False ) -> int:
'''simple docstring'''
lowerCAmelCase = 2
if unlogit:
lowerCAmelCase = torch.pow(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = p * torch.log(UpperCAmelCase__ )
lowerCAmelCase = 0
return -plogp.sum(dim=-1 )
def lowercase (snake_case__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
logger.info("""lv, h >\t""" + """\t""".join(f'''{x + 1}''' for x in range(len(UpperCAmelCase__ ) ) ) )
for row in range(len(UpperCAmelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + """\t""".join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + """\t""".join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def lowercase (snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : int=True , snake_case__ : List[str]=True , snake_case__ : int=None , snake_case__ : Optional[int]=False ) -> int:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = model.config.num_hidden_layers, model.config.num_attention_heads
lowerCAmelCase = torch.zeros(UpperCAmelCase__ , UpperCAmelCase__ ).to(args.device )
lowerCAmelCase = torch.zeros(UpperCAmelCase__ , UpperCAmelCase__ ).to(args.device )
if head_mask is None:
lowerCAmelCase = torch.ones(UpperCAmelCase__ , UpperCAmelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=UpperCAmelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCAmelCase = None
lowerCAmelCase = 0.0
lowerCAmelCase = 0.0
for step, inputs in enumerate(tqdm(UpperCAmelCase__ , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
lowerCAmelCase = tuple(t.to(args.device ) for t in inputs )
((lowerCAmelCase ) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCAmelCase = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , head_mask=UpperCAmelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(UpperCAmelCase__ ):
lowerCAmelCase = entropy(attn.detach() , UpperCAmelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(UpperCAmelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCAmelCase = 2
lowerCAmelCase = torch.pow(torch.pow(UpperCAmelCase__ , UpperCAmelCase__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
lowerCAmelCase = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(UpperCAmelCase__ )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(UpperCAmelCase__ )
logger.info("""Head ranked by importance scores""" )
lowerCAmelCase = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCAmelCase = torch.arange(
head_importance.numel() , device=args.device )
lowerCAmelCase = head_ranks.view_as(UpperCAmelCase__ )
print_ad_tensor(UpperCAmelCase__ )
return attn_entropy, head_importance, total_loss
def lowercase (snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Dict ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = compute_heads_importance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , compute_entropy=UpperCAmelCase__ )
lowerCAmelCase = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , UpperCAmelCase__ , original_score * args.masking_threshold )
lowerCAmelCase = torch.ones_like(UpperCAmelCase__ )
lowerCAmelCase = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCAmelCase = original_score
while current_score >= original_score * args.masking_threshold:
lowerCAmelCase = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCAmelCase = float("""Inf""" )
lowerCAmelCase = head_importance.view(-1 ).sort()[1]
if len(UpperCAmelCase__ ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
lowerCAmelCase = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
lowerCAmelCase = new_head_mask.view(-1 )
lowerCAmelCase = 0.0
lowerCAmelCase = new_head_mask.view_as(UpperCAmelCase__ )
lowerCAmelCase = new_head_mask.clone().detach()
print_ad_tensor(UpperCAmelCase__ )
# Compute metric and head importance again
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = compute_heads_importance(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , compute_entropy=UpperCAmelCase__ , head_mask=UpperCAmelCase__ )
lowerCAmelCase = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , UpperCAmelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("""Final head mask""" )
print_ad_tensor(UpperCAmelCase__ )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowercase (snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Optional[int] ) -> int:
'''simple docstring'''
lowerCAmelCase = datetime.now()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = compute_heads_importance(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , compute_entropy=UpperCAmelCase__ , compute_importance=UpperCAmelCase__ , head_mask=UpperCAmelCase__ )
lowerCAmelCase = 1 / loss
lowerCAmelCase = datetime.now() - before_time
lowerCAmelCase = sum(p.numel() for p in model.parameters() )
lowerCAmelCase = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCAmelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = [
v,
]
assert sum(len(UpperCAmelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(UpperCAmelCase__ )
lowerCAmelCase = sum(p.numel() for p in model.parameters() )
lowerCAmelCase = datetime.now()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = compute_heads_importance(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , compute_entropy=UpperCAmelCase__ , compute_importance=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , actually_pruned=UpperCAmelCase__ , )
lowerCAmelCase = 1 / loss
lowerCAmelCase = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , UpperCAmelCase__ , UpperCAmelCase__ , pruned_num_params / original_num_params * 100 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , UpperCAmelCase__ , UpperCAmelCase__ )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 )
save_model(UpperCAmelCase__ , args.output_dir )
def lowercase () -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=UpperCAmelCase__ , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=UpperCAmelCase__ , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=UpperCAmelCase__ , type=UpperCAmelCase__ , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=UpperCAmelCase__ , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=UpperCAmelCase__ , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=UpperCAmelCase__ , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=UpperCAmelCase__ , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=UpperCAmelCase__ , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=UpperCAmelCase__ , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=UpperCAmelCase__ , default=42 )
parser.add_argument("""--local_rank""" , type=UpperCAmelCase__ , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=UpperCAmelCase__ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=UpperCAmelCase__ , default="""""" , help="""Can be used for distant debugging.""" )
lowerCAmelCase = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCAmelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCAmelCase = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
lowerCAmelCase = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCAmelCase = torch.device("""cuda""" , args.local_rank )
lowerCAmelCase = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCAmelCase = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCAmelCase = nn.parallel.DistributedDataParallel(
UpperCAmelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCAmelCase__ )
elif args.n_gpu > 1:
lowerCAmelCase = nn.DataParallel(UpperCAmelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=UpperCAmelCase__ )
torch.save(UpperCAmelCase__ , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , UpperCAmelCase__ )
# Prepare dataset
lowerCAmelCase = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCAmelCase = (torch.from_numpy(UpperCAmelCase__ ),)
lowerCAmelCase = TensorDataset(*UpperCAmelCase__ )
lowerCAmelCase = RandomSampler(UpperCAmelCase__ )
lowerCAmelCase = DataLoader(UpperCAmelCase__ , sampler=UpperCAmelCase__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCAmelCase = mask_heads(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
prune_heads(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 715 |
"""simple docstring"""
import os
import string
import sys
a = 1 << 8
a = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 2_7,
'up': 6_5 + ARROW_KEY_FLAG,
'down': 6_6 + ARROW_KEY_FLAG,
'right': 6_7 + ARROW_KEY_FLAG,
'left': 6_8 + ARROW_KEY_FLAG,
'mod_int': 9_1,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 5_0,
'delete': 5_1,
'pg_up': 5_3,
'pg_down': 5_4,
}
a = KEYMAP['up']
a = KEYMAP['left']
if sys.platform == "win32":
a = []
a = {
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(1_0):
a = ord(str(i))
def lowercase () -> str:
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(snake_case__ ) == 0:
# Read the keystroke
lowerCAmelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(snake_case__ )
if ord(snake_case__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase = cha[1]
else:
lowerCAmelCase = ch.decode(snake_case__ )
else:
lowerCAmelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase = sys.stdin.fileno()
lowerCAmelCase = termios.tcgetattr(snake_case__ )
try:
tty.setraw(snake_case__ )
lowerCAmelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(snake_case__ , termios.TCSADRAIN , snake_case__ )
return ch
def lowercase () -> List[str]:
'''simple docstring'''
lowerCAmelCase = get_raw_chars()
if ord(snake_case__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(snake_case__ ) == KEYMAP["esc"]:
lowerCAmelCase = get_raw_chars()
if ord(snake_case__ ) == KEYMAP["mod_int"]:
lowerCAmelCase = get_raw_chars()
if ord(snake_case__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(snake_case__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(snake_case__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 529 | 0 |
from PIL import Image
def A ( lowercase__ : Image , lowercase__ : float ) -> Image:
def brightness(lowercase__ : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(lowercase__ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
UpperCamelCase = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png") | 45 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ : List[str] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {'vocab_file': 'spiece.model'}
lowerCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
lowerCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class SCREAMING_SNAKE_CASE ( snake_case_ ):
__magic_name__ : List[Any] = VOCAB_FILES_NAMES
__magic_name__ : Dict = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : Optional[Any]=False , lowercase__ : Any=False , lowercase__ : Optional[Any]=False , lowercase__ : Dict=None , lowercase__ : Any=None , lowercase__ : List[str]=None , lowercase__ : int=None , lowercase__ : Optional[Dict[str, Any]] = None , **lowercase__ : List[str] , ):
'''simple docstring'''
a_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
a_ : Optional[Any] = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
a_ : List[Any] = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
a_ : Union[str, Any] = """<|endoftext|>""" if eos_token is None else eos_token
a_ : Dict = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
a_ : Tuple = unk_token if pad_token is None else pad_token
a_ : Any = eos_token if bos_token is None else bos_token
else:
a_ : Optional[int] = """<pad>""" if pad_token is None else pad_token
a_ : str = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowercase__ , remove_space=lowercase__ , keep_accents=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
a_ : Optional[int] = do_lower_case
a_ : Any = remove_space
a_ : List[str] = keep_accents
a_ : int = vocab_file
a_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
# Used for whitespace normalization in input texts
# fmt : off
a_ : Dict = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
a_ : Tuple = re.compile(
F"[{''.join(map(lowercase__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]" )
def __getstate__( self : str ):
'''simple docstring'''
a_ : Optional[int] = self.__dict__.copy()
a_ : Tuple = None
return state
def __setstate__( self : Tuple , lowercase__ : Optional[int] ):
'''simple docstring'''
a_ : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
a_ : Union[str, Any] = {}
a_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.sp_model )
def lowercase_ ( self : Union[str, Any] , lowercase__ : str ):
'''simple docstring'''
a_ : Optional[int] = self.non_printing_characters_re.sub("""""" , lowercase__ )
# Normalize whitespaces
a_ : List[Any] = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
a_ : str = unicodedata.normalize("""NFC""" , lowercase__ )
return text
def lowercase_ ( self : Tuple , lowercase__ : str , **lowercase__ : Dict ):
'''simple docstring'''
a_ : Dict = self.preprocess_text(lowercase__ )
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def lowercase_ ( self : Optional[int] , lowercase__ : str ):
'''simple docstring'''
return self.sp_model.PieceToId(lowercase__ )
def lowercase_ ( self : Dict , lowercase__ : int ):
'''simple docstring'''
return self.sp_model.IdToPiece(lowercase__ )
@staticmethod
def lowercase_ ( lowercase__ : str ):
'''simple docstring'''
return out_string
def lowercase_ ( self : Union[str, Any] , lowercase__ : List[str] ):
'''simple docstring'''
a_ : Dict = []
a_ : int = """"""
a_ : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
a_ : int = True
a_ : Optional[int] = []
else:
current_sub_tokens.append(lowercase__ )
a_ : List[str] = False
out_string += self.sp_model.decode(lowercase__ )
return out_string
def lowercase_ ( self : Dict ):
'''simple docstring'''
a_ : List[Any] = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase_ ( self : Optional[Any] , lowercase__ : str , lowercase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
a_ : Any = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , """wb""" ) as fi:
a_ : int = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
def lowercase_ ( self : Union[str, Any] , lowercase__ : Union[str, List[str]] , lowercase__ : Union[str, bool] = False ):
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
a_ : List[str] = self.preprocess_text(lowercase__ )
a_ : str = self.sp_model.encode(lowercase__ )
else:
a_ : Optional[Any] = [self.preprocess_text(lowercase__ ) for t in text]
a_ : Any = self.sp_model.encode(lowercase__ )
if return_tensors is True or return_tensors == "pt":
a_ : Tuple = torch.tensor(lowercase__ )
return token_ids
def lowercase_ ( self : List[Any] , lowercase__ : Union[int, List[int]] ):
'''simple docstring'''
return self.sp_model.decode(lowercase__ )
def lowercase_ ( self : Dict , lowercase__ : "Conversation" ):
'''simple docstring'''
a_ : Dict = [F"User: {text}" if is_user else F"Bot: {text}" for is_user, text in conversation.iter_texts()]
a_ : Any = (
F"{self.eos_token}{self.bos_token}" + F"{self.bos_token}".join(lowercase__ ) + F"{self.bos_token}Bot:"
)
return self.encode(text=lowercase__ )
| 442 | 0 |
__a : Tuple = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
__a : int = {value: key for key, value in encode_dict.items()}
def _SCREAMING_SNAKE_CASE ( __lowercase : Dict ) -> str:
"""simple docstring"""
__A = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def _SCREAMING_SNAKE_CASE ( __lowercase : Tuple ) -> str:
"""simple docstring"""
if set(lowerCamelCase__ ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""" )
__A = ""
for word in coded.split():
while len(lowerCamelCase__ ) != 0:
decoded += decode_dict[word[:5]]
__A = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 710 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 199 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
def __init__( self : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any=13 , __UpperCamelCase : Union[str, Any]=3 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : List[Any]=True , __UpperCamelCase : str=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Optional[int]=224 , __UpperCamelCase : Optional[Any]=1000 , __UpperCamelCase : Tuple=[3, 3, 6, 4] , __UpperCamelCase : str=[48, 56, 112, 220] , ) -> Any:
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = num_labels
_UpperCamelCase = image_size
_UpperCamelCase = layer_depths
_UpperCamelCase = embed_dims
def _UpperCamelCase ( self : Dict ) -> int:
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self : str ) -> str:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__UpperCamelCase , layer_scale_init_value=1E-5 , )
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ) -> List[Any]:
_UpperCamelCase = SwiftFormerModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _UpperCamelCase ( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase = self.num_labels
_UpperCamelCase = SwiftFormerForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
_UpperCamelCase = SwiftFormerForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Union[str, Any] ) -> int:
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = self.prepare_config_and_inputs()
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
snake_case__ = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def _UpperCamelCase ( self : Any ) -> str:
_UpperCamelCase = SwiftFormerModelTester(self )
_UpperCamelCase = ConfigTester(
self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _UpperCamelCase ( self : Optional[int] ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def _UpperCamelCase ( self : int ) -> Tuple:
pass
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__UpperCamelCase )
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def _UpperCamelCase ( self : Tuple ) -> Optional[Any]:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__UpperCamelCase )
_UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def _UpperCamelCase ( self : Dict ) -> Optional[int]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCamelCase ( self : List[Any] ) -> List[str]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = SwiftFormerModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
pass
def _UpperCamelCase ( self : List[Any] ) -> List[str]:
def check_hidden_states_output(__UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple ):
_UpperCamelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
_UpperCamelCase = outputs.hidden_states
_UpperCamelCase = 8
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__UpperCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Tuple ) -> str:
def _config_zero_init(__UpperCamelCase : Any ):
_UpperCamelCase = copy.deepcopy(__UpperCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__UpperCamelCase , __UpperCamelCase , 1E-10 )
if isinstance(getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ):
_UpperCamelCase = _config_zero_init(getattr(__UpperCamelCase , __UpperCamelCase ) )
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return configs_no_init
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _UpperCamelCase ( self : Any ) -> Tuple:
pass
def lowercase ( ) -> int:
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCamelCase ( self : int ) -> List[str]:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self : List[str] ) -> List[str]:
_UpperCamelCase = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(__UpperCamelCase )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__UpperCamelCase , return_tensors='''pt''' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**__UpperCamelCase )
# verify the logits
_UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
_UpperCamelCase = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 420 | """simple docstring"""
from __future__ import annotations
import time
import numpy as np
UpperCAmelCase = [8, 5, 9, 7]
UpperCAmelCase = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
UpperCAmelCase = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCAmelCase_ :
def __init__( self : List[str] , __UpperCamelCase : list[int] , __UpperCamelCase : list[list[int]] , __UpperCamelCase : list[list[int]] , ) -> None:
_UpperCamelCase = claim_vector
_UpperCamelCase = allocated_resources_table
_UpperCamelCase = maximum_claim_table
def _UpperCamelCase ( self : Union[str, Any] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _UpperCamelCase ( self : Any ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _UpperCamelCase ( self : Tuple ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__UpperCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _UpperCamelCase ( self : Any ) -> dict[int, list[int]]:
return {self.__need().index(__UpperCamelCase ): i for i in self.__need()}
def _UpperCamelCase ( self : str , **__UpperCamelCase : Union[str, Any] ) -> None:
_UpperCamelCase = self.__need()
_UpperCamelCase = self.__allocated_resources_table
_UpperCamelCase = self.__available_resources()
_UpperCamelCase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
_UpperCamelCase = False
for each_need in need_list:
_UpperCamelCase = True
for index, need in enumerate(__UpperCamelCase ):
if need > available_resources[index]:
_UpperCamelCase = False
break
if execution:
_UpperCamelCase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_UpperCamelCase = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(__UpperCamelCase )
# update available/freed resources stack
_UpperCamelCase = np.array(__UpperCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__UpperCamelCase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(__UpperCamelCase ) + 1}'''
+ ''' '''.join(F'''{it:>8}''' for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(__UpperCamelCase ) + 1}'''
+ ''' '''.join(F'''{it:>8}''' for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__UpperCamelCase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__UpperCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 420 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 703 |
from __future__ import annotations
def lowerCamelCase_ ( _a : str , _a : list[str] | None = None , _a : dict[str, float] | None = None , _a : bool = False , ):
'''simple docstring'''
UpperCAmelCase_ : int = cipher_alphabet or [chr(_a ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCAmelCase_ : int = {
"""a""": 0.0_8_4_9_7,
"""b""": 0.0_1_4_9_2,
"""c""": 0.0_2_2_0_2,
"""d""": 0.0_4_2_5_3,
"""e""": 0.1_1_1_6_2,
"""f""": 0.0_2_2_2_8,
"""g""": 0.0_2_0_1_5,
"""h""": 0.0_6_0_9_4,
"""i""": 0.0_7_5_4_6,
"""j""": 0.0_0_1_5_3,
"""k""": 0.0_1_2_9_2,
"""l""": 0.0_4_0_2_5,
"""m""": 0.0_2_4_0_6,
"""n""": 0.0_6_7_4_9,
"""o""": 0.0_7_5_0_7,
"""p""": 0.0_1_9_2_9,
"""q""": 0.0_0_0_9_5,
"""r""": 0.0_7_5_8_7,
"""s""": 0.0_6_3_2_7,
"""t""": 0.0_9_3_5_6,
"""u""": 0.0_2_7_5_8,
"""v""": 0.0_0_9_7_8,
"""w""": 0.0_2_5_6_0,
"""x""": 0.0_0_1_5_0,
"""y""": 0.0_1_9_9_4,
"""z""": 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
UpperCAmelCase_ : Tuple = frequencies_dict
if not case_sensitive:
UpperCAmelCase_ : Any = ciphertext.lower()
# Chi squared statistic values
UpperCAmelCase_ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_a ) ):
UpperCAmelCase_ : Optional[int] = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCAmelCase_ : Any = (alphabet_letters.index(letter.lower() ) - shift) % len(
_a )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCAmelCase_ : int = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCAmelCase_ : Tuple = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCAmelCase_ : int = decrypted_with_shift.lower().count(_a )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCAmelCase_ : Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCAmelCase_ : List[Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCAmelCase_ : List[str] = decrypted_with_shift.count(_a )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCAmelCase_ : Dict = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCAmelCase_ : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCAmelCase_ : List[str] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_a : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCAmelCase_ : int = min(
_a , key=_a , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Tuple = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 322 | 0 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_snake_case = logging.get_logger(__name__)
@add_end_docstrings(_lowercase )
class _snake_case ( _lowercase ):
def __init__( self: str , **__lowerCamelCase: str ) -> List[Any]:
super().__init__(**__lowerCamelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self: Any , __lowerCamelCase: Union[str, List[str], "Image", List["Image"]] , **__lowerCamelCase: Tuple ) -> List[str]:
return super().__call__(__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Any , **__lowerCamelCase: List[Any] ) -> Optional[int]:
__UpperCAmelCase : Any = {}
if "candidate_labels" in kwargs:
__UpperCAmelCase : Any = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
__UpperCAmelCase : str = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: int , __lowerCamelCase: Dict=None , __lowerCamelCase: Optional[int]="This is a photo of {}." ) -> Optional[Any]:
__UpperCAmelCase : Union[str, Any] = load_image(__lowerCamelCase )
__UpperCAmelCase : Tuple = self.image_processor(images=[image] , return_tensors=self.framework )
__UpperCAmelCase : Dict = candidate_labels
__UpperCAmelCase : Any = [hypothesis_template.format(__lowerCamelCase ) for x in candidate_labels]
__UpperCAmelCase : Any = self.tokenizer(__lowerCamelCase , return_tensors=self.framework , padding=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = [text_inputs]
return inputs
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Dict ) -> Tuple:
__UpperCAmelCase : str = model_inputs.pop("candidate_labels" )
__UpperCAmelCase : Dict = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , __lowerCamelCase ):
__UpperCAmelCase : Dict = text_inputs[0]
else:
# Batching case.
__UpperCAmelCase : Optional[int] = text_inputs[0][0]
__UpperCAmelCase : Optional[int] = self.model(**__lowerCamelCase , **__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def _lowerCamelCase ( self: Any , __lowerCamelCase: int ) -> Optional[Any]:
__UpperCAmelCase : Tuple = model_outputs.pop("candidate_labels" )
__UpperCAmelCase : Dict = model_outputs["logits"][0]
if self.framework == "pt":
__UpperCAmelCase : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
__UpperCAmelCase : Optional[Any] = probs.tolist()
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Optional[Any] = [scores]
elif self.framework == "tf":
__UpperCAmelCase : str = stable_softmax(__lowerCamelCase , axis=-1 )
__UpperCAmelCase : Optional[Any] = probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
__UpperCAmelCase : List[Any] = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__lowerCamelCase , __lowerCamelCase ) , key=lambda __lowerCamelCase : -x[0] )
]
return result
| 382 | import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
_snake_case = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Union[str, Any] = torch.load(snake_case__, map_location="cpu" )
return sd
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=rename_keys_prefix ) -> List[Any]:
__UpperCAmelCase : Optional[int] = OrderedDict()
__UpperCAmelCase : List[str] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__UpperCAmelCase : Optional[int] = key
for name_pair in rename_keys_prefix:
__UpperCAmelCase : List[Any] = new_key.replace(name_pair[0], name_pair[1] )
__UpperCAmelCase : Optional[Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__UpperCAmelCase : Optional[Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Optional[Any]:
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
__UpperCAmelCase : int = "pretraining"
if "vcr" in checkpoint_path:
__UpperCAmelCase : Optional[Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__UpperCAmelCase : int = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__UpperCAmelCase : Tuple = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__UpperCAmelCase : Optional[Any] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
__UpperCAmelCase : Optional[int] = {"visual_embedding_dim": 512}
__UpperCAmelCase : List[str] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__UpperCAmelCase : Optional[int] = {"visual_embedding_dim": 2048}
__UpperCAmelCase : str = "vqa_advanced"
elif "vqa" in checkpoint_path:
__UpperCAmelCase : str = {"visual_embedding_dim": 2048, "num_labels": 3129}
__UpperCAmelCase : Union[str, Any] = "vqa"
elif "nlvr" in checkpoint_path:
__UpperCAmelCase : str = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__UpperCAmelCase : Optional[int] = "nlvr"
__UpperCAmelCase : Optional[int] = VisualBertConfig(**snake_case__ )
# Load State Dict
__UpperCAmelCase : str = load_state_dict(snake_case__ )
__UpperCAmelCase : int = get_new_dict(snake_case__, snake_case__ )
if model_type == "pretraining":
__UpperCAmelCase : Union[str, Any] = VisualBertForPreTraining(snake_case__ )
elif model_type == "vqa":
__UpperCAmelCase : Union[str, Any] = VisualBertForQuestionAnswering(snake_case__ )
elif model_type == "nlvr":
__UpperCAmelCase : str = VisualBertForVisualReasoning(snake_case__ )
elif model_type == "multichoice":
__UpperCAmelCase : int = VisualBertForMultipleChoice(snake_case__ )
model.load_state_dict(snake_case__ )
# Save Checkpoints
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
_snake_case = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 382 | 1 |
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowercase__ ( lowerCAmelCase : List[str]=None ) -> Any:
"""simple docstring"""
if subparsers is not None:
UpperCAmelCase = subparsers.add_parser('test' )
else:
UpperCAmelCase = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=lowercase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase_ )
return parser
def lowercase__ ( lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
UpperCAmelCase = script_name
else:
UpperCAmelCase = F"--config_file={args.config_file} {script_name}"
UpperCAmelCase = ["accelerate-launch"] + test_args.split()
UpperCAmelCase = execute_subprocess_async(lowercase_ , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = test_command_parser()
UpperCAmelCase = parser.parse_args()
test_command(lowercase_ )
if __name__ == "__main__":
main()
| 707 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def lowercase__ ( lowerCAmelCase : list[float] ) -> Dict:
"""simple docstring"""
return np.maximum(0 , lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 183 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Union[str, Any] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 567 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE () -> Generator[int, None, None]:
'''simple docstring'''
lowercase_ = {}
lowercase_ = 2
while True:
lowercase_ = factor_map.pop(__lowerCAmelCase , __lowerCAmelCase )
if factor:
lowercase_ = factor + prime
while x in factor_map:
x += factor
lowercase_ = factor
else:
lowercase_ = prime
yield prime
prime += 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1E10 ) -> int:
'''simple docstring'''
lowercase_ = sieve()
lowercase_ = 1
while True:
lowercase_ = next(__lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 567 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 80 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase_ ( __lowerCamelCase , unittest.TestCase ):
a_ = KandinskyVaaInpaintPipeline
a_ = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
a_ = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
a_ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a_ = False
@property
def lowerCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return 3_2
@property
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return 3_2
@property
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
return self.time_input_dim
@property
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
return 1_0_0
@property
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase_ = UNetaDConditionModel(**_UpperCamelCase )
return model
@property
def lowerCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = self.dummy_unet
UpperCAmelCase_ = self.dummy_movq
UpperCAmelCase_ = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=_UpperCamelCase , )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_UpperCamelCase )
# create init_image
UpperCAmelCase_ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
# create mask
UpperCAmelCase_ = np.ones((6_4, 6_4) , dtype=np.floataa )
UpperCAmelCase_ = 0
if str(_UpperCamelCase ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ = {
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def lowerCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = "cpu"
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_UpperCamelCase )
UpperCAmelCase_ = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) , return_dict=_UpperCamelCase , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase_ = np.array(
[0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" )
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase_ = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
UpperCAmelCase_ = 0
UpperCAmelCase_ = "a hat"
UpperCAmelCase_ = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCamelCase )
UpperCAmelCase_ = KandinskyVaaInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint" , torch_dtype=torch.floataa )
UpperCAmelCase_ = pipeline.to(_UpperCamelCase )
pipeline.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ = pipe_prior(
_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase_ = pipeline(
image=_UpperCamelCase , mask_image=_UpperCamelCase , image_embeds=_UpperCamelCase , negative_image_embeds=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
| 660 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def UpperCAmelCase_ ( ):
SCREAMING_SNAKE_CASE__ =ArgumentParser("""Diffusers CLI tool""", usage="""diffusers-cli <command> [<args>]""" )
SCREAMING_SNAKE_CASE__ =parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(__UpperCamelCase )
# Let's go
SCREAMING_SNAKE_CASE__ =parser.parse_args()
if not hasattr(__UpperCamelCase, """func""" ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE__ =args.func(__UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 151 | 0 |
def UpperCamelCase_ ( __a ) -> list[list[float]]:
a__ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(__a ):
if len(__a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(__a ) )
return data_lists
def UpperCamelCase_ ( __a , __a ) -> list[list[float]]:
a__ : list[list[float]] = []
for dlist, weight in zip(__a , __a ):
a__ : str = min(__a )
a__ : Dict = max(__a )
a__ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
a__ : Dict = f'''Invalid weight of {weight:f} provided'''
raise ValueError(__a )
score_lists.append(__a )
return score_lists
def UpperCamelCase_ ( __a ) -> list[float]:
a__ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(__a ):
a__ : Optional[Any] = final_scores[j] + ele
return final_scores
def UpperCamelCase_ ( __a , __a ) -> list[list[float]]:
a__ : int = get_data(__a )
a__ : Tuple = calculate_each_score(__a , __a )
a__ : List[str] = generate_final_scores(__a )
# append scores to source data
for i, ele in enumerate(__a ):
source_data[i].append(__a )
return source_data
| 151 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A__ ( A__ ):
"""simple docstring"""
_lowercase = (DPMSolverSinglestepScheduler,)
_lowercase = (('num_inference_steps', 2_5),)
def _UpperCamelCase( self : Optional[int] , **lowerCamelCase__ : Tuple ):
a__ : Any = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**lowerCamelCase__ )
return config
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple=0 , **lowerCamelCase__ : Union[str, Any] ):
a__ : Optional[Any] = dict(self.forward_default_kwargs )
a__ : Any = kwargs.pop("num_inference_steps" , lowerCamelCase__ )
a__ : Any = self.dummy_sample
a__ : int = 0.1 * sample
a__ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a__ : Dict = self.get_scheduler_config(**lowerCamelCase__ )
a__ : Any = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
a__ : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
a__ : List[str] = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
a__ : int = dummy_past_residuals[: new_scheduler.config.solver_order]
a__, a__ : Any = sample, sample
for t in range(lowerCamelCase__ , time_step + scheduler.config.solver_order + 1 ):
a__ : List[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
a__ : List[str] = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _UpperCamelCase( self : Any ):
pass
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Optional[Any]=0 , **lowerCamelCase__ : Optional[int] ):
a__ : Tuple = dict(self.forward_default_kwargs )
a__ : Tuple = kwargs.pop("num_inference_steps" , lowerCamelCase__ )
a__ : Union[str, Any] = self.dummy_sample
a__ : Tuple = 0.1 * sample
a__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a__ : Dict = self.get_scheduler_config()
a__ : List[str] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
a__ : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
a__ : int = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
a__ : List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
a__ : List[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
a__ : Dict = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Any=None , **lowerCamelCase__ : Union[str, Any] ):
if scheduler is None:
a__ : Union[str, Any] = self.scheduler_classes[0]
a__ : Optional[Any] = self.get_scheduler_config(**lowerCamelCase__ )
a__ : str = scheduler_class(**lowerCamelCase__ )
a__ : List[Any] = self.scheduler_classes[0]
a__ : int = self.get_scheduler_config(**lowerCamelCase__ )
a__ : Any = scheduler_class(**lowerCamelCase__ )
a__ : Any = 10
a__ : int = self.dummy_model()
a__ : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : List[str] = model(lowerCamelCase__ , lowerCamelCase__ )
a__ : Any = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
return sample
def _UpperCamelCase( self : str ):
a__ : str = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
a__ : Optional[Any] = 50
a__ : List[str] = self.dummy_model()
a__ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
a__ : List[str] = model(lowerCamelCase__ , lowerCamelCase__ )
a__ : List[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
a__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def _UpperCamelCase( self : Union[str, Any] ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
a__ : Tuple = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
a__ : int = self.full_loop(scheduler=lowerCamelCase__ )
a__ : str = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
a__ : Tuple = DEISMultistepScheduler.from_config(scheduler.config )
a__ : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
a__ : int = UniPCMultistepScheduler.from_config(scheduler.config )
a__ : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
a__ : Optional[Any] = self.full_loop(scheduler=lowerCamelCase__ )
a__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _UpperCamelCase( self : Union[str, Any] ):
self.check_over_configs(thresholding=lowerCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase__ , prediction_type=lowerCamelCase__ , sample_max_value=lowerCamelCase__ , algorithm_type="dpmsolver++" , solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , )
def _UpperCamelCase( self : List[str] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def _UpperCamelCase( self : List[str] ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , )
a__ : Dict = self.full_loop(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , )
assert not torch.isnan(lowerCamelCase__ ).any(), "Samples have nan numbers"
def _UpperCamelCase( self : str ):
self.check_over_configs(lower_order_final=lowerCamelCase__ )
self.check_over_configs(lower_order_final=lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] ):
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _UpperCamelCase( self : Union[str, Any] ):
self.check_over_configs(variance_type=lowerCamelCase__ )
self.check_over_configs(variance_type="learned_range" )
def _UpperCamelCase( self : Any ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=lowerCamelCase__ , time_step=0 )
def _UpperCamelCase( self : Optional[int] ):
a__ : Optional[int] = self.full_loop()
a__ : Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _UpperCamelCase( self : str ):
a__ : List[str] = self.full_loop(use_karras_sigmas=lowerCamelCase__ )
a__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def _UpperCamelCase( self : int ):
a__ : List[Any] = self.full_loop(prediction_type="v_prediction" )
a__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def _UpperCamelCase( self : Tuple ):
a__ : Any = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=lowerCamelCase__ )
a__ : List[str] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def _UpperCamelCase( self : str ):
a__ : Union[str, Any] = self.scheduler_classes[0]
a__ : List[str] = self.get_scheduler_config(thresholding=lowerCamelCase__ , dynamic_thresholding_ratio=0 )
a__ : int = scheduler_class(**lowerCamelCase__ )
a__ : int = 10
a__ : int = self.dummy_model()
a__ : Optional[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : Dict = model(lowerCamelCase__ , lowerCamelCase__ )
a__ : Optional[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
| 151 | 1 |
import inspect
import unittest
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCamelCase_ ( self ) -> List[str]:
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE__: Tuple= inspect.getmembers(lowerCAmelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE__: Optional[int]= '''k-diffusion'''
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE__: int= '''invisible-watermark'''
assert backend in deps, f'{backend} is not in the deps table!'
| 64 | from math import factorial
def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1_0_0 ) -> int:
return sum(int(snake_case__ ) for x in str(factorial(snake_case__ ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 312 | 0 |
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
return number | (1 << position)
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
return number & ~(1 << position)
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
return number ^ (1 << position)
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
a__ : List[str] = """docs/source/en/_toctree.yml"""
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = defaultdict(__lowerCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
_lowerCAmelCase = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase = []
for duplicate_key in duplicates:
_lowerCAmelCase = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__lowerCamelCase ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__lowerCamelCase, key=lambda __lowerCamelCase : s["title"].lower() )
def A__ ( __lowerCamelCase=False ):
"""simple docstring"""
with open(__lowerCamelCase, encoding='utf-8' ) as f:
_lowerCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase = content[api_idx]['sections']
# Then to the model doc
_lowerCAmelCase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_lowerCAmelCase = api_doc[model_idx]['sections']
_lowerCAmelCase = [(idx, section) for idx, section in enumerate(__lowerCamelCase ) if 'sections' in section]
_lowerCAmelCase = False
for idx, modality_doc in modalities_docs:
_lowerCAmelCase = modality_doc['sections']
_lowerCAmelCase = clean_model_doc_toc(__lowerCamelCase )
if old_modality_doc != new_modality_doc:
_lowerCAmelCase = True
if overwrite:
_lowerCAmelCase = new_modality_doc
if diff:
if overwrite:
_lowerCAmelCase = model_doc
_lowerCAmelCase = api_doc
with open(__lowerCamelCase, 'w', encoding='utf-8' ) as f:
f.write(yaml.dump(__lowerCamelCase, allow_unicode=__lowerCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a__ : str = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 309 | 0 |
def lowerCamelCase__ ( _a):
if a < 0:
raise ValueError("Input value must be a positive integer")
elif isinstance(_a , _a):
raise TypeError("Input value must be a 'int' type")
return bin(_a).count("1")
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 |
"""simple docstring"""
from ... import PretrainedConfig
lowerCAmelCase: str ={
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class lowerCamelCase__ ( __UpperCamelCase ):
__UpperCAmelCase = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__UpperCAmelCase = """nezha"""
def __init__( self , snake_case=2_1_1_2_8 , snake_case=7_6_8 , snake_case=1_2 , snake_case=1_2 , snake_case=3_0_7_2 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=6_4 , snake_case=2 , snake_case=0.02 , snake_case=1E-12 , snake_case=0.1 , snake_case=0 , snake_case=2 , snake_case=3 , snake_case=True , **snake_case , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
lowercase : int = vocab_size
lowercase : Tuple = hidden_size
lowercase : Any = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : Optional[int] = hidden_act
lowercase : int = intermediate_size
lowercase : Union[str, Any] = hidden_dropout_prob
lowercase : Optional[int] = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : List[str] = max_relative_position
lowercase : List[str] = type_vocab_size
lowercase : Dict = initializer_range
lowercase : Any = layer_norm_eps
lowercase : Optional[int] = classifier_dropout
lowercase : Union[str, Any] = use_cache
| 607 | 0 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A = 'src/transformers'
A = 'docs/source/en/tasks'
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
with open(__UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
A = f.readlines()
# Find the start prompt.
A = 0
while not lines[start_index].startswith(__UpperCAmelCase ):
start_index += 1
start_index += 1
A = start_index
while not lines[end_index].startswith(__UpperCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A = direct_transformers_import(TRANSFORMERS_PATH)
A = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def lowerCAmelCase__ ( lowerCamelCase__ ) -> Optional[int]:
A = TASK_GUIDE_TO_MODELS[task_guide]
A = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__UpperCAmelCase , set() )
A = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__=False ) -> Tuple:
A , A , A , A = _find_text_in_file(
filename=os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
A = get_model_list_for_task(__UpperCAmelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
' to fix this.' )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
A = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 713 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase_ : int = MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase_ : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING
def A_ ( self : Tuple ) -> int:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def A_ ( self : int ) -> Optional[int]:
'''simple docstring'''
A = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
A = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1E-0_5, 'token': 38_015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1E-0_5, 'token': 25_506, 'token_str': ' accuser'},
] , )
A = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1E-0_5,
'token': 38_015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1E-0_5,
'token': 25_506,
'token_str': ' accuser',
},
] , )
A = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2E-0_5, 'token': 13_606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2E-0_5, 'token': 3_499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9E-0_5, 'token': 2_941, 'token_str': ' Te'},
] , )
@require_torch
def A_ ( self : str ) -> int:
'''simple docstring'''
A = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
A = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2E-0_5, 'token': 35_676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2E-0_5, 'token': 16_416, 'token_str': 'ELS'},
] , )
A = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2E-0_5,
'token': 35_676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2E-0_5, 'token': 16_416, 'token_str': 'ELS'},
] , )
A = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1E-0_5, 'token': 3_499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2E-0_5, 'token': 2_941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2E-0_5, 'token': 13_606, 'token_str': ' Clara'},
] , )
A = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=6 ) , [
[
{
'score': 2.2E-0_5,
'token': 35_676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2E-0_5, 'token': 16_416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2E-0_5,
'token': 35_676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2E-0_5, 'token': 16_416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def A_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
A = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
A = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(snake_case , snake_case )
@slow
@require_torch
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
A = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(snake_case )
@slow
@require_tf
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(snake_case )
def A_ ( self : Dict , snake_case : List[Any] ) -> Optional[int]:
'''simple docstring'''
A = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(snake_case ) , [
{'sequence': 'My name is John', 'score': 0.008, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.007, 'token': 1_573, 'token_str': ' Chris'},
] , )
A = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(snake_case ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.251,
'token': 2_201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.214,
'token': 12_790,
'token_str': ' Lyon',
},
] , )
A = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case ) , [
{'sequence': 'My name is Patrick', 'score': 0.005, 'token': 3_499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.000, 'token': 13_606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.000, 'token': 2_941, 'token_str': ' Te'},
] , )
@require_torch
def A_ ( self : List[str] ) -> Any:
'''simple docstring'''
A = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
A = None
A = None
self.run_pipeline_test(snake_case , [] )
@require_tf
def A_ ( self : Tuple ) -> Dict:
'''simple docstring'''
A = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
A = None
A = None
self.run_pipeline_test(snake_case , [] )
def A_ ( self : str , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
A = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
A = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def A_ ( self : Any , snake_case : Any , snake_case : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A = fill_masker.tokenizer
A = fill_masker.model
A = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
snake_case , [
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
] , )
A = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
snake_case , [
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
] , )
A = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
snake_case , [
[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
],
[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
],
] , )
with self.assertRaises(snake_case ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(snake_case ):
fill_masker('This is' )
self.run_test_top_k(snake_case , snake_case )
self.run_test_targets(snake_case , snake_case )
self.run_test_top_k_targets(snake_case , snake_case )
self.fill_mask_with_duplicate_targets_and_top_k(snake_case , snake_case )
self.fill_mask_with_multiple_masks(snake_case , snake_case )
def A_ ( self : str , snake_case : Any , snake_case : Optional[int] ) -> str:
'''simple docstring'''
A = tokenizer.get_vocab()
A = sorted(vocab.keys() )[:2]
# Pipeline argument
A = FillMaskPipeline(model=snake_case , tokenizer=snake_case , targets=snake_case )
A = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
snake_case , [
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
] , )
A = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , snake_case )
A = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(snake_case ) )
# Call argument
A = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case )
self.assertEqual(
snake_case , [
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
] , )
A = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , snake_case )
A = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(snake_case ) )
# Score equivalence
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case )
A = [top_mask['token_str'] for top_mask in outputs]
A = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case ) == set(snake_case ):
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case )
A = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(snake_case ) , nested_simplify(snake_case ) )
# Raises with invalid
with self.assertRaises(snake_case ):
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(snake_case ):
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''] )
with self.assertRaises(snake_case ):
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='' )
def A_ ( self : Any , snake_case : Optional[Any] , snake_case : int ) -> List[Any]:
'''simple docstring'''
A = FillMaskPipeline(model=snake_case , tokenizer=snake_case , top_k=2 )
A = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
snake_case , [
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
] , )
A = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
snake_case , [
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
] , )
self.assertEqual(nested_simplify(snake_case ) , nested_simplify(snake_case ) )
def A_ ( self : str , snake_case : List[str] , snake_case : Dict ) -> Tuple:
'''simple docstring'''
A = tokenizer.get_vocab()
A = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
# top_k=2, ntargets=3
A = sorted(vocab.keys() )[:3]
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=snake_case )
# If we use the most probably targets, and filter differently, we should still
# have the same results
A = [el['token_str'] for el in sorted(snake_case , key=lambda snake_case : x["score"] , reverse=snake_case )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case ).issubset(snake_case ):
A = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=snake_case )
# They should yield exactly the same result
self.assertEqual(nested_simplify(snake_case ) , nested_simplify(snake_case ) )
def A_ ( self : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Dict ) -> int:
'''simple docstring'''
A = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
A = tokenizer.get_vocab()
# String duplicates + id duplicates
A = sorted(vocab.keys() )[:3]
A = [targets[0], targets[1], targets[0], targets[2], targets[1]]
A = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=snake_case , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(snake_case ) , 3 )
def A_ ( self : str , snake_case : List[Any] , snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A = FillMaskPipeline(model=snake_case , tokenizer=snake_case )
A = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
snake_case , [
[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
],
[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
],
[
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
{'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )},
],
] , )
| 109 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : int = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 |
import numpy as np
import qiskit
def A_ ( _lowerCAmelCase = 8 , _lowerCAmelCase = None ) -> str:
UpperCamelCase : Tuple = np.random.default_rng(seed=_lowerCAmelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
UpperCamelCase : List[str] = 6 * key_len
# Measurement basis for Alice's qubits.
UpperCamelCase : List[Any] = rng.integers(2 , size=_lowerCAmelCase )
# The set of states Alice will prepare.
UpperCamelCase : List[Any] = rng.integers(2 , size=_lowerCAmelCase )
# Measurement basis for Bob's qubits.
UpperCamelCase : Optional[int] = rng.integers(2 , size=_lowerCAmelCase )
# Quantum Circuit to simulate BB84
UpperCamelCase : List[Any] = qiskit.QuantumCircuit(_lowerCAmelCase , name="BB84" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCAmelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCAmelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCAmelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCAmelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCAmelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
UpperCamelCase : Union[str, Any] = qiskit.Aer.get_backend("aer_simulator" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
UpperCamelCase : Tuple = qiskit.execute(_lowerCAmelCase , _lowerCAmelCase , shots=1 , seed_simulator=_lowerCAmelCase )
# Returns the result of measurement.
UpperCamelCase : Optional[Any] = job.result().get_counts(_lowerCAmelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
UpperCamelCase : Tuple = "".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
UpperCamelCase : Tuple = gen_key[:key_len] if len(_lowerCAmelCase ) >= key_len else gen_key.ljust(_lowerCAmelCase , "0" )
return key
if __name__ == "__main__":
print(f"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 629 | 1 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase : List[str] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = FlaxAutoModelForSeqaSeqLM.from_config(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
UpperCamelCase : Dict = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
UpperCamelCase : List[str] = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase : Optional[int] = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
UpperCamelCase : Tuple = F"""layers_{str(SCREAMING_SNAKE_CASE_ )}"""
# Self-Attention
UpperCamelCase : Dict = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
UpperCamelCase : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
UpperCamelCase : str = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
UpperCamelCase : Any = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
UpperCamelCase : Any = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
UpperCamelCase : str = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
UpperCamelCase : Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
UpperCamelCase : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
UpperCamelCase : List[Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
UpperCamelCase : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
UpperCamelCase : Union[str, Any] = flax_model.params['''encoder''']['''block'''][str(SCREAMING_SNAKE_CASE_ )]['''layer''']
UpperCamelCase : str = tax_attention_key
UpperCamelCase : int = tax_attention_out
UpperCamelCase : Dict = tax_attention_query
UpperCamelCase : Dict = tax_attention_value
UpperCamelCase : int = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase : List[str] = tax_global_layer_norm
if split_mlp_wi:
UpperCamelCase : Tuple = tax_mlp_wi_a
UpperCamelCase : Optional[Any] = tax_mlp_wi_a
else:
UpperCamelCase : Optional[Any] = tax_mlp_wi
UpperCamelCase : Tuple = tax_mlp_wo
UpperCamelCase : Dict = tax_mlp_layer_norm
UpperCamelCase : Tuple = flax_model_encoder_layer_block
# Only for layer 0:
UpperCamelCase : Any = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
UpperCamelCase : Any = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase : List[str] = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
UpperCamelCase : Optional[Any] = tax_encoder_global_rel_embedding
# Assigning
UpperCamelCase : Any = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
UpperCamelCase : List[Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
UpperCamelCase : Any = F"""layers_{str(SCREAMING_SNAKE_CASE_ )}"""
# Self-Attention
UpperCamelCase : Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
UpperCamelCase : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
UpperCamelCase : List[str] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
UpperCamelCase : str = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
UpperCamelCase : Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
UpperCamelCase : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
UpperCamelCase : Tuple = tax_enc_dec_attention_module['''key''']['''kernel''']
UpperCamelCase : Dict = tax_enc_dec_attention_module['''out''']['''kernel''']
UpperCamelCase : Union[str, Any] = tax_enc_dec_attention_module['''query''']['''kernel''']
UpperCamelCase : Optional[Any] = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
UpperCamelCase : int = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
UpperCamelCase : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
UpperCamelCase : int = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
UpperCamelCase : Tuple = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
UpperCamelCase : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
UpperCamelCase : int = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
UpperCamelCase : int = flax_model.params['''decoder''']['''block'''][str(SCREAMING_SNAKE_CASE_ )]['''layer''']
UpperCamelCase : str = tax_attention_key
UpperCamelCase : str = tax_attention_out
UpperCamelCase : Union[str, Any] = tax_attention_query
UpperCamelCase : List[Any] = tax_attention_value
UpperCamelCase : int = tax_pre_attention_layer_norm
UpperCamelCase : Union[str, Any] = tax_enc_dec_attention_key
UpperCamelCase : Union[str, Any] = tax_enc_dec_attention_out
UpperCamelCase : Optional[int] = tax_enc_dec_attention_query
UpperCamelCase : Union[str, Any] = tax_enc_dec_attention_value
UpperCamelCase : List[str] = tax_cross_layer_norm
if split_mlp_wi:
UpperCamelCase : str = tax_mlp_wi_a
UpperCamelCase : Optional[Any] = tax_mlp_wi_a
else:
UpperCamelCase : List[str] = tax_mlp_wi
UpperCamelCase : Tuple = tax_mlp_wo
UpperCamelCase : int = txa_mlp_layer_norm
UpperCamelCase : List[Any] = flax_model_decoder_layer_block
# Decoder Normalization
UpperCamelCase : Optional[Any] = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
UpperCamelCase : Optional[Any] = txa_decoder_norm
# Only for layer 0:
UpperCamelCase : int = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
UpperCamelCase : Union[str, Any] = tax_decoder_rel_embedding
# Token Embeddings
UpperCamelCase : int = tax_model['''target''']['''token_embedder''']['''embedding''']
UpperCamelCase : int = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
UpperCamelCase : List[str] = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
__UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
__UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 643 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__UpperCAmelCase : Dict = False
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCamelCase : str = torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = pipe(
image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCamelCase : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : Dict = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 643 | 1 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__magic_name__ = 16
__magic_name__ = 32
def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16 , lowerCamelCase : str = "bert-base-cased"):
A_ : int = AutoTokenizer.from_pretrained(lowerCamelCase)
A_ : Tuple = load_dataset("""glue""" , """mrpc""")
def tokenize_function(lowerCamelCase : int):
# max_length=None => use the model max length (it's actually the default)
A_ : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A_ : Optional[Any] = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(lowerCamelCase : Tuple):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""")
return tokenizer.pad(lowerCamelCase , padding="""longest""" , return_tensors="""pt""")
# Instantiate dataloaders.
A_ : List[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase)
A_ : List[str] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase)
return train_dataloader, eval_dataloader
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int]):
# Initialize accelerator
A_ : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : Optional[int] = config["""lr"""]
A_ : Dict = int(config["""num_epochs"""])
A_ : Dict = int(config["""seed"""])
A_ : List[str] = int(config["""batch_size"""])
A_ : Optional[int] = args.model_name_or_path
set_seed(lowerCamelCase)
A_ , A_ : str = get_dataloaders(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : List[str] = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase , return_dict=lowerCamelCase)
# Instantiate optimizer
A_ : Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A_ : str = optimizer_cls(params=model.parameters() , lr=lowerCamelCase)
if accelerator.state.deepspeed_plugin is not None:
A_ : Optional[int] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
A_ : Tuple = 1
A_ : Any = (len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A_ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=0 , num_training_steps=lowerCamelCase , )
else:
A_ : Dict = DummyScheduler(lowerCamelCase , total_num_steps=lowerCamelCase , warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : Tuple = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
# We need to keep track of how many total steps we have iterated over
A_ : List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
A_ : str = 0
# Now we train the model
A_ : Dict = evaluate.load("""glue""" , """mrpc""")
A_ : str = 0
A_ : str = {}
for epoch in range(lowerCamelCase , lowerCamelCase):
model.train()
for step, batch in enumerate(lowerCamelCase):
A_ : Union[str, Any] = model(**lowerCamelCase)
A_ : str = outputs.loss
A_ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
A_ : Optional[int] = 0
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
A_ : List[str] = model(**lowerCamelCase)
A_ : Dict = outputs.logits.argmax(dim=-1)
# It is slightly faster to call this once, than multiple times
A_ , A_ : str = accelerator.gather(
(predictions, batch["""labels"""])) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCamelCase) - 1:
A_ : Dict = predictions[: len(eval_dataloader.dataset) - samples_seen]
A_ : Tuple = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
A_ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase)
A_ : Tuple = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
A_ : Optional[Any] = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""") , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( ):
A_ : Tuple = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""")
parser.add_argument(
"""--model_name_or_path""" , type=lowerCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase , )
parser.add_argument(
"""--output_dir""" , type=lowerCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=lowerCamelCase , default=lowerCamelCase , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCamelCase , default=3 , help="""Number of train epochs.""" , )
A_ : Union[str, Any] = parser.parse_args()
A_ : str = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase)
if __name__ == "__main__":
main()
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : str = [0] * len(lowerCamelCase)
A_ : Union[str, Any] = []
A_ : Union[str, Any] = []
A_ : Tuple = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase)):
if indegree[i] == 0:
queue.append(lowerCamelCase)
while queue:
A_ : Any = queue.pop(0)
cnt += 1
topo.append(lowerCamelCase)
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCamelCase)
if cnt != len(lowerCamelCase):
print("""Cycle exists""")
else:
print(lowerCamelCase)
# Adjacency List of Graph
__magic_name__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 665 | 1 |
from __future__ import annotations
from math import pi, sqrt
def __lowerCAmelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> tuple:
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
lowercase = {'''mobilebert-uncased''': 5_1_2}
lowercase = {}
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = MobileBertTokenizer
def __init__( self : Union[str, Any] , __UpperCamelCase : str=None , __UpperCamelCase : str=None , __UpperCamelCase : Dict=True , __UpperCamelCase : Any="[UNK]" , __UpperCamelCase : str="[SEP]" , __UpperCamelCase : Dict="[PAD]" , __UpperCamelCase : List[str]="[CLS]" , __UpperCamelCase : Any="[MASK]" , __UpperCamelCase : Any=True , __UpperCamelCase : int=None , **__UpperCamelCase : Dict , ):
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , tokenize_chinese_chars=__UpperCamelCase , strip_accents=__UpperCamelCase , **__UpperCamelCase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __UpperCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __UpperCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __UpperCamelCase ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(__UpperCamelCase , normalizer_state.pop("""type""" ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**__UpperCamelCase )
lowerCamelCase_ = do_lower_case
def lowercase__ ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict=None ):
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : Tuple , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
lowerCamelCase_ = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 103 | 0 |
from math import sqrt
def a__ ( A__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(sqrt(A__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( A__ = 1_0_0_0_1 ):
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
while count != nth and number < 3:
number += 1
if is_prime(A__ ):
count += 1
while count != nth:
number += 2
if is_prime(A__ ):
count += 1
return number
if __name__ == "__main__":
print(F"""{solution() = }""")
| 101 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __a ( lowerCAmelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_= SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCAmelCase_= 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
UpperCAmelCase_= 4
UpperCAmelCase_= 48
UpperCAmelCase_= """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCAmelCase_= [6, 6, 6, 6]
UpperCAmelCase_= 60
UpperCAmelCase_= [6, 6, 6, 6]
UpperCAmelCase_= """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCAmelCase_= 4
UpperCAmelCase_= """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
UpperCAmelCase_= 1
UpperCAmelCase_= 1
UpperCAmelCase_= 1_26
UpperCAmelCase_= 7
UpperCAmelCase_= 255.0
UpperCAmelCase_= """"""
return config
def __a ( lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
UpperCAmelCase_= name.replace("""patch_embed.proj""" ,"""embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
UpperCAmelCase_= name.replace("""patch_embed.norm""" ,"""embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
UpperCAmelCase_= name.replace("""layers""" ,"""encoder.stages""" )
if "residual_group.blocks" in name:
UpperCAmelCase_= name.replace("""residual_group.blocks""" ,"""layers""" )
if "attn.proj" in name:
UpperCAmelCase_= name.replace("""attn.proj""" ,"""attention.output.dense""" )
if "attn" in name:
UpperCAmelCase_= name.replace("""attn""" ,"""attention.self""" )
if "norm1" in name:
UpperCAmelCase_= name.replace("""norm1""" ,"""layernorm_before""" )
if "norm2" in name:
UpperCAmelCase_= name.replace("""norm2""" ,"""layernorm_after""" )
if "mlp.fc1" in name:
UpperCAmelCase_= name.replace("""mlp.fc1""" ,"""intermediate.dense""" )
if "mlp.fc2" in name:
UpperCAmelCase_= name.replace("""mlp.fc2""" ,"""output.dense""" )
if "q_bias" in name:
UpperCAmelCase_= name.replace("""q_bias""" ,"""query.bias""" )
if "k_bias" in name:
UpperCAmelCase_= name.replace("""k_bias""" ,"""key.bias""" )
if "v_bias" in name:
UpperCAmelCase_= name.replace("""v_bias""" ,"""value.bias""" )
if "cpb_mlp" in name:
UpperCAmelCase_= name.replace("""cpb_mlp""" ,"""continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
UpperCAmelCase_= name.replace("""patch_embed.proj""" ,"""patch_embed.projection""" )
if name == "norm.weight":
UpperCAmelCase_= """layernorm.weight"""
if name == "norm.bias":
UpperCAmelCase_= """layernorm.bias"""
if "conv_first" in name:
UpperCAmelCase_= name.replace("""conv_first""" ,"""first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
UpperCAmelCase_= name.replace("""conv_last""" ,"""final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
UpperCAmelCase_= name.replace("""conv_before_upsample.0""" ,"""conv_before_upsample""" )
if "upsample.0" in name:
UpperCAmelCase_= name.replace("""upsample.0""" ,"""upsample.convolution_0""" )
if "upsample.2" in name:
UpperCAmelCase_= name.replace("""upsample.2""" ,"""upsample.convolution_1""" )
UpperCAmelCase_= """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
UpperCAmelCase_= name.replace("""upsample.0.weight""" ,"""upsample.conv.weight""" )
UpperCAmelCase_= name.replace("""upsample.0.bias""" ,"""upsample.conv.bias""" )
else:
pass
else:
UpperCAmelCase_= """swin2sr.""" + name
return name
def __a ( lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_= orig_state_dict.pop(lowerCAmelCase_ )
if "qkv" in key:
UpperCAmelCase_= key.split(""".""" )
UpperCAmelCase_= int(key_split[1] )
UpperCAmelCase_= int(key_split[4] )
UpperCAmelCase_= config.embed_dim
if "weight" in key:
UpperCAmelCase_= val[:dim, :]
UpperCAmelCase_= val[dim : dim * 2, :]
UpperCAmelCase_= val[-dim:, :]
else:
UpperCAmelCase_= val[:dim]
UpperCAmelCase_= val[dim : dim * 2]
UpperCAmelCase_= val[-dim:]
pass
else:
UpperCAmelCase_= val
return orig_state_dict
def __a ( lowerCAmelCase_ : Dict ,lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_= get_config(lowerCAmelCase_ )
UpperCAmelCase_= SwinaSRForImageSuperResolution(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_= torch.hub.load_state_dict_from_url(lowerCAmelCase_ ,map_location="""cpu""" )
UpperCAmelCase_= convert_state_dict(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase_, UpperCAmelCase_= model.load_state_dict(lowerCAmelCase_ ,strict=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(lowerCAmelCase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"""Unexpected key {key} in state_dict""" )
# verify values
UpperCAmelCase_= """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
UpperCAmelCase_= Image.open(requests.get(lowerCAmelCase_ ,stream=lowerCAmelCase_ ).raw ).convert("""RGB""" )
UpperCAmelCase_= SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
UpperCAmelCase_= 1_26 if """Jpeg""" in checkpoint_url else 2_56
UpperCAmelCase_= Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
UpperCAmelCase_= transforms(lowerCAmelCase_ ).unsqueeze(0 )
if config.num_channels == 1:
UpperCAmelCase_= pixel_values[:, 0, :, :].unsqueeze(1 )
UpperCAmelCase_= model(lowerCAmelCase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
UpperCAmelCase_= torch.Size([1, 3, 5_12, 5_12] )
UpperCAmelCase_= torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCAmelCase_= torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase_= torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
UpperCAmelCase_= torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase_= torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCAmelCase_= torch.Size([1, 3, 5_12, 5_12] )
UpperCAmelCase_= torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCAmelCase_= torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase_= torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] ,lowerCAmelCase_ ,atol=1E-3 )
print("""Looks ok!""" )
UpperCAmelCase_= {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
UpperCAmelCase_= url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
model.push_to_hub(F"""caidas/{model_name}""" )
processor.push_to_hub(F"""caidas/{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
__A = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 593 | 0 |
from __future__ import annotations
from math import gcd
def A ( lowercase__ : int , lowercase__ : int = 2 , lowercase__ : int = 1 , lowercase__ : int = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int:
return (pow(UpperCamelCase__ , 2 ) + step) % modulus
for _ in range(UpperCamelCase__ ):
# These track the position within the cycle detection logic.
UpperCamelCase__ :List[Any] = seed
UpperCamelCase__ :Dict = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
UpperCamelCase__ :Tuple = rand_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase__ :Any = rand_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase__ :int = rand_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
UpperCamelCase__ :List[Any] = gcd(hare - tortoise , UpperCamelCase__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
UpperCamelCase__ :Optional[int] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
UpperCamelCase = parser.parse_args()
UpperCamelCase = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f'''{args.num} is probably prime''')
else:
UpperCamelCase = args.num // divisor
print(f'''{args.num} = {divisor} * {quotient}''') | 720 |
from __future__ import annotations
def A ( lowercase__ : list[int] ) -> int:
if not nums:
return 0
UpperCamelCase__ :Dict = nums[0]
UpperCamelCase__ :Dict = 0
for num in nums[1:]:
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = (
max_excluding + num,
max(lowercase__ , lowercase__ ),
)
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 383 | 0 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = ""
_A = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_A = None # compression type in fsspec. ex: "gzip"
_A = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self: Union[str, Any], lowerCamelCase_: str = "", lowerCamelCase_: Optional[str] = None, lowerCamelCase_: Optional[dict] = None, **lowerCamelCase_: int ):
super().__init__(self, **lowerCamelCase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowercase__ : Dict = fsspec.open(
lowerCamelCase_, mode='rb', protocol=lowerCamelCase_, compression=self.compression, client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed.
}, **(target_options or {}), )
lowercase__ : List[Any] = os.path.basename(self.file.path.split('::' )[0] )
lowercase__ : Optional[int] = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
lowercase__ : str = None
@classmethod
def snake_case__( cls: Optional[int], lowerCamelCase_: List[Any] ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowerCamelCase_ ).lstrip('/' )
def snake_case__( self: Union[str, Any] ):
if self.dir_cache is None:
lowercase__ : Optional[Any] = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
lowercase__ : Tuple = {f['name']: f}
def snake_case__( self: int, lowerCamelCase_: str ):
return self.file.open().read()
def snake_case__( self: Optional[Any], lowerCamelCase_: str, lowerCamelCase_: str = "rb", lowerCamelCase_: Optional[int]=None, lowerCamelCase_: Union[str, Any]=True, lowerCamelCase_: Optional[Any]=None, **lowerCamelCase_: Tuple, ):
lowercase__ : List[str] = self._strip_protocol(lowerCamelCase_ )
if mode != "rb":
raise ValueError(F"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = "bz2"
_A = "bz2"
_A = ".bz2"
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = "gzip"
_A = "gzip"
_A = ".gz"
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = "lz4"
_A = "lz4"
_A = ".lz4"
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = "xz"
_A = "xz"
_A = ".xz"
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = "zstd"
_A = "zstd"
_A = ".zst"
def __init__( self: Union[str, Any], lowerCamelCase_: str, lowerCamelCase_: str = "rb", lowerCamelCase_: Optional[str] = None, lowerCamelCase_: Optional[dict] = None, lowerCamelCase_: int = DEFAULT_BLOCK_SIZE, **lowerCamelCase_: List[str], ):
super().__init__(
fo=lowerCamelCase_, mode=lowerCamelCase_, target_protocol=lowerCamelCase_, target_options=lowerCamelCase_, block_size=lowerCamelCase_, **lowerCamelCase_, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowercase__ : Tuple = self.file.__enter__
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: List[Any], lowerCamelCase_: str ):
lowercase__ : Tuple = file_
def __enter__( self: List[Any] ):
self._file.__enter__()
return self
def __exit__( self: Tuple, *lowerCamelCase_: Optional[Any], **lowerCamelCase_: str ):
self._file.__exit__(*lowerCamelCase_, **lowerCamelCase_ )
def __iter__( self: Tuple ):
return iter(self._file )
def snake_case__( self: Optional[Any] ):
return next(self._file )
def __getattr__( self: int, lowerCamelCase_: List[str] ):
return getattr(self._file, lowerCamelCase_ )
def fixed_enter(*lowerCamelCase_: Union[str, Any], **lowerCamelCase_: Union[str, Any] ):
return WrappedFile(_enter(*lowerCamelCase_, **lowerCamelCase_ ) )
lowercase__ : List[str] = fixed_enter
| 266 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( _lowercase : list[float] , _lowercase : Tuple ) -> int:
'''simple docstring'''
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(_lowercase ):
print(f"""{i}\t\t{d}""" )
def SCREAMING_SNAKE_CASE__ ( _lowercase : list[dict[str, int]] , _lowercase : list[float] , _lowercase : int ) -> Any:
'''simple docstring'''
for j in range(_lowercase ):
lowercase__ , lowercase__ , lowercase__ : Dict = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def SCREAMING_SNAKE_CASE__ ( _lowercase : list[dict[str, int]] , _lowercase : int , _lowercase : int , _lowercase : int ) -> list[float]:
'''simple docstring'''
lowercase__ : Dict = [float('inf' )] * vertex_count
lowercase__ : Dict = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_lowercase ):
lowercase__ , lowercase__ , lowercase__ : int = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
lowercase__ : str = distance[u] + w
lowercase__ : str = check_negative_cycle(_lowercase , _lowercase , _lowercase )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase: Optional[int] = int(input("""Enter number of vertices: """).strip())
__UpperCamelCase: Union[str, Any] = int(input("""Enter number of edges: """).strip())
__UpperCamelCase: list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase: List[str] = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
__UpperCamelCase: List[Any] = {"""src""": src, """dst""": dest, """weight""": weight}
__UpperCamelCase: Optional[int] = int(input("""\nEnter shortest path source:""").strip())
__UpperCamelCase: Dict = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 266 | 1 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__( snake_case__ , unittest.TestCase ):
'''simple docstring'''
snake_case__ = LEDTokenizer
snake_case__ = LEDTokenizerFast
snake_case__ = True
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
super().setUp()
UpperCamelCase__ : Any =[
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCamelCase__ : int =dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE))))
UpperCamelCase__ : Optional[int] =["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase__ : Optional[int] ={"unk_token": "<unk>"}
UpperCamelCase__ : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
UpperCamelCase__ : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(__SCREAMING_SNAKE_CASE))
def UpperCAmelCase ( self , **__SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def UpperCAmelCase ( self) -> Optional[Any]:
"""simple docstring"""
return LEDTokenizer.from_pretrained("allenai/led-base-16384")
@cached_property
def UpperCAmelCase ( self) -> Optional[Any]:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384")
@require_torch
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCamelCase__ : Optional[int] =[0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : Any =tokenizer(__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE) , padding=__SCREAMING_SNAKE_CASE , return_tensors="pt")
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
UpperCamelCase__ : List[Any] =batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
@require_torch
def UpperCAmelCase ( self) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] =["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : int =tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors="pt")
self.assertIn("input_ids" , __SCREAMING_SNAKE_CASE)
self.assertIn("attention_mask" , __SCREAMING_SNAKE_CASE)
self.assertNotIn("labels" , __SCREAMING_SNAKE_CASE)
self.assertNotIn("decoder_attention_mask" , __SCREAMING_SNAKE_CASE)
@require_torch
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =[
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : Tuple =tokenizer(text_target=__SCREAMING_SNAKE_CASE , max_length=32 , padding="max_length" , return_tensors="pt")
self.assertEqual(32 , targets["input_ids"].shape[1])
@require_torch
def UpperCAmelCase ( self) -> int:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : Optional[int] =tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors="pt")
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(batch.input_ids.shape , (2, 51_22))
@require_torch
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] =["A long paragraph for summarization."]
UpperCamelCase__ : Any =[
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : str =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="pt")
UpperCamelCase__ : str =tokenizer(text_target=__SCREAMING_SNAKE_CASE , return_tensors="pt")
UpperCamelCase__ : int =inputs["input_ids"]
UpperCamelCase__ : Tuple =targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
@require_torch
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : Any =["Summary of the text.", "Another summary."]
UpperCamelCase__ : List[str] =[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCamelCase__ : str =tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[Any] =[[0] * len(__SCREAMING_SNAKE_CASE) for x in encoded_output["input_ids"]]
UpperCamelCase__ : Any =tokenizer.pad(__SCREAMING_SNAKE_CASE)
self.assertSequenceEqual(outputs["global_attention_mask"] , __SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
UpperCamelCase__ : Dict =self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Dict =self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[str] ="A, <mask> AllenNLP sentence."
UpperCamelCase__ : List[Any] =tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Dict =tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE)
self.assertEqual(sum(tokens_r["token_type_ids"]) , sum(tokens_p["token_type_ids"]))
self.assertEqual(
sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]) , sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]) , )
UpperCamelCase__ : List[str] =tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])
UpperCamelCase__ : str =tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2])
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2])
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"])
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"])
| 708 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowerCamelCase ( ) -> int:
'''simple docstring'''
UpperCamelCase__ : str =ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=A_ , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=A_ , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=A_ )
return parser.parse_args()
def _lowerCamelCase ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Dict =parse_args()
# Import training_script as a module.
UpperCamelCase__ : Union[str, Any] =Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCamelCase__ : Optional[Any] =script_fpath.stem
UpperCamelCase__ : Any =importlib.import_module(A_ )
# Patch sys.argv
UpperCamelCase__ : Optional[Any] =[args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 582 | 0 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = 4 ):
A_ : int = abs(__UpperCAmelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCAmelCase )] for y in range(__UpperCAmelCase )]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return reverse_row(transpose(__UpperCAmelCase ) )
# OR.. transpose(reverse_column(matrix))
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return reverse_row(reverse_column(__UpperCAmelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return reverse_column(transpose(__UpperCAmelCase ) )
# OR.. transpose(reverse_row(matrix))
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Tuple = [list(__UpperCAmelCase ) for x in zip(*__UpperCAmelCase )]
return matrix
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Dict = matrix[::-1]
return matrix
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : List[str] = [x[::-1] for x in matrix]
return matrix
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
for i in matrix:
print(*__UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
UpperCamelCase = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
UpperCamelCase = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 590 | import operator as op
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
__snake_case : List[str] = []
__snake_case : Optional[int] = lambda __UpperCAmelCase , __UpperCAmelCase : int(x / y ) # noqa: E731 integer division operation
__snake_case : int = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(__UpperCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__UpperCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
else:
__snake_case : Optional[int] = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
__snake_case : List[Any] = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
stack.append(
str(opr[x](int(__UpperCAmelCase ) , int(__UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
__magic_name__ = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 576 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, ) -> tuple:
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 |
"""simple docstring"""
import json
import sys
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Union[str, Any]:
"""simple docstring"""
with open(__snake_case, encoding='''utf-8''' ) as f:
_UpperCamelCase = json.load(__snake_case )
_UpperCamelCase = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(__snake_case ):
_UpperCamelCase = results[benchmark_name]
_UpperCamelCase = benchmark_name.split('''/''' )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
_UpperCamelCase = '''| metric |'''
_UpperCamelCase = '''|--------|'''
_UpperCamelCase = '''| new / old (diff) |'''
for metric_name in sorted(__snake_case ):
_UpperCamelCase = benchmark_res[metric_name]
_UpperCamelCase = metric_vals['''new''']
_UpperCamelCase = metric_vals.get('''old''', __snake_case )
_UpperCamelCase = metric_vals.get('''diff''', __snake_case )
_UpperCamelCase = F''' {new_val:f}''' if isinstance(__snake_case, (int, float) ) else '''None'''
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(__snake_case, (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(__snake_case, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('''</details>''' )
with open(__snake_case, '''w''', encoding='''utf-8''' ) as f:
f.writelines('''\n'''.join(__snake_case ) )
if __name__ == "__main__":
_a = sys.argv[1]
_a = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 78 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : Dict = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class a__ ( _lowerCamelCase ):
a : List[Any] = """gpt_neo"""
a : Optional[Any] = ["""past_key_values"""]
a : Union[str, Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , A=50257 , A=2048 , A=2048 , A=24 , A=[[["global", "local"], 12]] , A=16 , A=None , A=256 , A="gelu_new" , A=0.0 , A=0.0 , A=0.0 , A=0.1 , A=1e-5 , A=0.0_2 , A=True , A=50256 , A=50256 , **A , ) -> int:
'''simple docstring'''
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = num_layers
a = num_heads
a = intermediate_size
a = window_size
a = activation_function
a = resid_dropout
a = embed_dropout
a = attention_dropout
a = classifier_dropout
a = layer_norm_epsilon
a = initializer_range
a = use_cache
a = bos_token_id
a = eos_token_id
a = attention_types
a = self.expand_attention_types_params(lowerCAmelCase_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@staticmethod
def lowerCAmelCase_ ( A ) -> Union[str, Any]:
'''simple docstring'''
a = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> int:
import torch
a = input.size()
a = len(__A)
a = shape[dimension]
a = torch.arange(0 , __A , __A)
a = torch.div(sizedim - size , __A , rounding_mode="floor") + 1
a = torch.arange(__A) + low_indices[:min_length][:, None]
a = [slice(__A)] * rank
a = indices
a = input[s]
a = list(range(0 , rank + 1))
perm.append(perm.pop(dimension + 1))
return sliced.permute(__A)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> Dict:
import torch
a = torch.arange(1 , __A)
a = torch.remainder(__A , __A)
a = remainders == 0
a = candidates[divisor_indices]
a = torch.max(__A)
return largest_divisor, torch.div(__A , __A , rounding_mode="floor")
class a__ ( _lowerCamelCase ):
@property
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
a = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction="inputs" )
a = {0: "batch", 1: "past_sequence + sequence"}
else:
a = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
return self._config.num_heads
def lowerCAmelCase_ ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> List[str]:
'''simple docstring'''
a = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
a = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
a , a = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
a = seqlen + 2
a = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
a = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
a = common_inputs["attention_mask"]
if self.use_past:
a = ordered_inputs["attention_mask"].dtype
a = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
return 13
| 515 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : str = logging.get_logger(__name__)
lowercase : Any = ["model.decoder.embed_positions.weights"]
def SCREAMING_SNAKE_CASE__ ( __A ) -> List[Any]:
if "emb" in name:
_snake_case = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
_snake_case = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
_snake_case = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
_snake_case = name.replace('linear1' , 'fc1' )
if "linear2" in name:
_snake_case = name.replace('linear2' , 'fc2' )
if "norm1" in name:
_snake_case = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
_snake_case = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
_snake_case = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
_snake_case = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
_snake_case = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
_snake_case = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Tuple[Dict, Dict]:
_snake_case = list(state_dict.keys() )
_snake_case = {}
for key in keys:
_snake_case = state_dict.pop(__A )
_snake_case = rename_keys(__A )
if "in_proj_weight" in key:
# split fused qkv proj
_snake_case = val[:hidden_size, :]
_snake_case = val[hidden_size : 2 * hidden_size, :]
_snake_case = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_snake_case = val
else:
_snake_case = val
return state_dict, enc_dec_proj_state_dict
def SCREAMING_SNAKE_CASE__ ( __A ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
_snake_case = 1_024
_snake_case = 24
_snake_case = 16
elif checkpoint == "medium":
_snake_case = 1_536
_snake_case = 48
_snake_case = 24
elif checkpoint == "large":
_snake_case = 2_048
_snake_case = 48
_snake_case = 32
else:
raise ValueError(F'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
_snake_case = MusicgenDecoderConfig(
hidden_size=__A , ffn_dim=hidden_size * 4 , num_hidden_layers=__A , num_attention_heads=__A , )
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A=None , __A=None , __A="cpu" ) -> Any:
_snake_case = MusicGen.get_pretrained(__A , device=__A )
_snake_case = decoder_config_from_checkpoint(__A )
_snake_case = fairseq_model.lm.state_dict()
_snake_case , _snake_case = rename_state_dict(
__A , hidden_size=decoder_config.hidden_size )
_snake_case = TaEncoderModel.from_pretrained('t5-base' )
_snake_case = EncodecModel.from_pretrained('facebook/encodec_32khz' )
_snake_case = MusicgenForCausalLM(__A ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_snake_case , _snake_case = decoder.load_state_dict(__A , strict=__A )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__A )
if len(__A ) > 0:
raise ValueError(F'Missing key(s) in state_dict: {missing_keys}' )
if len(__A ) > 0:
raise ValueError(F'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
_snake_case = MusicgenForConditionalGeneration(text_encoder=__A , audio_encoder=__A , decoder=__A )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__A )
# check we can do a forward pass
_snake_case = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_snake_case = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_snake_case = model(input_ids=__A , decoder_input_ids=__A ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
_snake_case = AutoTokenizer.from_pretrained('t5-base' )
_snake_case = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
_snake_case = MusicgenProcessor(feature_extractor=__A , tokenizer=__A )
# set the appropriate bos/pad token ids
_snake_case = 2_048
_snake_case = 2_048
# set other default generation config params
_snake_case = int(30 * audio_encoder.config.frame_rate )
_snake_case = True
_snake_case = 3.0
if pytorch_dump_folder is not None:
Path(__A ).mkdir(exist_ok=__A )
logger.info(F'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if repo_id:
logger.info(F'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(__A )
processor.push_to_hub(__A )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowercase : List[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 495 | 0 |
import sys
_UpperCamelCase : str = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __UpperCamelCase ( snake_case = N ) -> int:
'''simple docstring'''
__A = -sys.maxsize - 1
for i in range(len(snake_case ) - 1_2 ):
__A = 1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
__A = product
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 710 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class _lowerCAmelCase( _a , unittest.TestCase):
"""simple docstring"""
lowerCamelCase__ = MvpTokenizer
lowerCamelCase__ = MvpTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = filter_roberta_detectors
def SCREAMING_SNAKE_CASE__ ( self )-> Dict:
super().setUp()
__A = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__A = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
__A = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__A = {'''unk_token''': '''<unk>'''}
__A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self , **UpperCAmelCase )-> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , **UpperCAmelCase )-> Dict:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> List[str]:
return "lower newer", "lower newer"
@cached_property
def SCREAMING_SNAKE_CASE__ ( self )-> List[Any]:
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self )-> Dict:
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self )-> int:
__A = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__A = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A = tokenizer(UpperCAmelCase , max_length=len(UpperCAmelCase ) , padding=UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__A = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Test that special tokens are reset
@require_torch
def SCREAMING_SNAKE_CASE__ ( self )-> int:
__A = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' , UpperCAmelCase )
self.assertIn('''attention_mask''' , UpperCAmelCase )
self.assertNotIn('''labels''' , UpperCAmelCase )
self.assertNotIn('''decoder_attention_mask''' , UpperCAmelCase )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self )-> Tuple:
__A = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A = tokenizer(text_target=UpperCAmelCase , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self )-> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A = tokenizer(
['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 10_24) )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self )-> List[Any]:
__A = ['''A long paragraph for summarization.''']
__A = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A = tokenizer(UpperCAmelCase , text_target=UpperCAmelCase , return_tensors='''pt''' )
__A = inputs['''input_ids''']
__A = inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self )-> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__A = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
__A = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
__A = '''A, <mask> AllenNLP sentence.'''
__A = tokenizer_r.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
__A = tokenizer_p.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__A = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__A = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 341 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 62 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : int = ["""pixel_values"""]
def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BICUBIC , snake_case = True , snake_case = 1 / 255 , snake_case = True , snake_case = None , snake_case = None , snake_case = True , **snake_case , ) -> None:
"""simple docstring"""
super().__init__(**snake_case )
a__ : List[str] = size if size is not None else {"height": 384, "width": 384}
a__ : List[str] = get_size_dict(snake_case , default_to_square=snake_case )
a__ : Any = do_resize
a__ : Optional[int] = size
a__ : int = resample
a__ : Optional[Any] = do_rescale
a__ : Dict = rescale_factor
a__ : Dict = do_normalize
a__ : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a__ : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
a__ : str = do_convert_rgb
def _snake_case ( self , snake_case , snake_case , snake_case = PILImageResampling.BICUBIC , snake_case = None , **snake_case , ) -> np.ndarray:
"""simple docstring"""
a__ : Optional[int] = get_size_dict(snake_case , default_to_square=snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
a__ : Optional[int] = (size["height"], size["width"])
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def _snake_case ( self , snake_case , snake_case , snake_case = None , **snake_case , ) -> List[str]:
"""simple docstring"""
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ) -> np.ndarray:
"""simple docstring"""
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def _snake_case ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ) -> PIL.Image.Image:
"""simple docstring"""
a__ : List[str] = do_resize if do_resize is not None else self.do_resize
a__ : Optional[Any] = resample if resample is not None else self.resample
a__ : int = do_rescale if do_rescale is not None else self.do_rescale
a__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
a__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
a__ : List[Any] = image_mean if image_mean is not None else self.image_mean
a__ : int = image_std if image_std is not None else self.image_std
a__ : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a__ : Tuple = size if size is not None else self.size
a__ : Any = get_size_dict(snake_case , default_to_square=snake_case )
a__ : str = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a__ : List[str] = [convert_to_rgb(snake_case ) for image in images]
# All transformations expect numpy arrays.
a__ : Dict = [to_numpy_array(snake_case ) for image in images]
if do_resize:
a__ : Union[str, Any] = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
if do_rescale:
a__ : Tuple = [self.rescale(image=snake_case , scale=snake_case ) for image in images]
if do_normalize:
a__ : List[str] = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images]
a__ : Union[str, Any] = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
a__ : List[Any] = BatchFeature(data={"pixel_values": images} , tensor_type=snake_case )
return encoded_outputs
| 112 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( _UpperCamelCase ,unittest.TestCase ):
"""simple docstring"""
a_ = CanineTokenizer
a_ = False
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().setUp()
a_ : Optional[int] = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCAmelCase ( self ):
'''simple docstring'''
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def _lowerCAmelCase ( self , **lowerCAmelCase_ ):
'''simple docstring'''
a_ : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
a_ : Union[str, Any] = 10_24
return tokenizer
@require_torch
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = self.canine_tokenizer
a_ : int = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
a_ : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
a_ : Optional[int] = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors="""pt""" )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
a_ : str = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = self.canine_tokenizer
a_ : Dict = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
a_ : List[str] = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , _UpperCAmelCase )
self.assertIn("""attention_mask""" , _UpperCAmelCase )
self.assertIn("""token_type_ids""" , _UpperCAmelCase )
@require_torch
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = self.canine_tokenizer
a_ : Optional[Any] = [
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
a_ : Optional[int] = tokenizer(
text_target=_UpperCAmelCase , max_length=32 , padding="""max_length""" , truncation=_UpperCAmelCase , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
a_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a_ : Any = tempfile.mkdtemp()
a_ : Union[str, Any] = ''' He is very happy, UNwant\u00E9d,running'''
a_ : Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
a_ : int = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
a_ : Any = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
a_ : Dict = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a_ : List[str] = tempfile.mkdtemp()
a_ : str = ''' He is very happy, UNwant\u00E9d,running'''
a_ : Tuple = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
a_ : Optional[int] = chr(0xE007 )
additional_special_tokens.append(_UpperCAmelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
a_ : List[str] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
a_ : Optional[int] = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
a_ : List[Any] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertIn(_UpperCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
a_ : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_UpperCAmelCase )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[str] = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
a_ : Union[str, Any] = self.get_clean_sequence(_UpperCAmelCase )
# a special token for Canine can be defined as follows:
a_ : Union[str, Any] = 0xE005
a_ : Any = chr(_UpperCAmelCase )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
a_ : List[str] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , 1 )
a_ : int = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_UpperCAmelCase )
a_ : Union[str, Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
a_ : str = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
a_ : Optional[Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , input_encoded + special_token_id )
a_ : Dict = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
a_ : Optional[Any] = chr(0xE005 )
a_ : Tuple = chr(0xE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_UpperCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
a_ : Dict = tokenizer.tokenize(_UpperCAmelCase )
a_ : Any = tokenizer.tokenize(_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , 1 )
self.assertEqual(len(_UpperCAmelCase ) , 1 )
self.assertEqual(token_a[0] , _UpperCAmelCase )
self.assertEqual(token_a[0] , _UpperCAmelCase )
@require_tokenizers
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Tuple = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
a_ : Optional[Any] = 0xE006
a_ : List[str] = chr(_UpperCAmelCase )
a_ : Union[str, Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_UpperCAmelCase )
tokenizer.from_pretrained(_UpperCAmelCase )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
a_ : Any = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
a_ : Tuple = json.load(_UpperCAmelCase )
# a special token for Canine can be defined as follows:
a_ : str = 0xE006
a_ : int = chr(_UpperCAmelCase )
a_ : Optional[int] = [new_token_a]
a_ : Tuple = [new_token_a]
with open(os.path.join(_UpperCAmelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a_ : Union[str, Any] = tokenizer_class.from_pretrained(_UpperCAmelCase , extra_ids=0 )
self.assertIn(_UpperCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
a_ : Any = 0xE007
a_ : Optional[int] = chr(_UpperCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a_ : Dict = [AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase )]
a_ : List[Any] = tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , extra_ids=0 )
self.assertIn(_UpperCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : str = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
a_ : List[str] = '''hello world'''
if self.space_between_special_tokens:
a_ : Any = '''[CLS] hello world [SEP]'''
else:
a_ : Optional[int] = input
a_ : List[Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
a_ : Optional[int] = tokenizer.decode(_UpperCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_UpperCAmelCase , [output, output.lower()] )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
a_ : Optional[int] = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
a_ : List[str] = '''a'''
a_ : Optional[Any] = ord(_UpperCAmelCase )
for attr in attributes_list:
setattr(_UpperCAmelCase , attr + """_id""" , _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase , attr + """_id""" ) , _UpperCAmelCase )
setattr(_UpperCAmelCase , attr + """_id""" , _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase , attr + """_id""" ) , _UpperCAmelCase )
setattr(_UpperCAmelCase , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(_UpperCAmelCase , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(_UpperCAmelCase , """additional_special_tokens_ids""" ) , [] )
a_ : Optional[Any] = 0xE006
a_ : List[Any] = chr(_UpperCAmelCase )
setattr(_UpperCAmelCase , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(_UpperCAmelCase , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(_UpperCAmelCase , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def _lowerCAmelCase ( self ):
'''simple docstring'''
pass
def _lowerCAmelCase ( self ):
'''simple docstring'''
pass
def _lowerCAmelCase ( self ):
'''simple docstring'''
pass
def _lowerCAmelCase ( self ):
'''simple docstring'''
pass
def _lowerCAmelCase ( self ):
'''simple docstring'''
pass
def _lowerCAmelCase ( self ):
'''simple docstring'''
pass
def _lowerCAmelCase ( self ):
'''simple docstring'''
pass
def _lowerCAmelCase ( self ):
'''simple docstring'''
pass
| 713 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase__ )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = field(default="automatic-speech-recognition" ,metadata={"include_in_asdict_even_if_is_default": True} )
a_ = Features({"audio": Audio()} )
a_ = Features({"transcription": Value("string" )} )
a_ = "audio"
a_ = "transcription"
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(f'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , lowerCAmelCase_ ):
raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' )
a_ : int = copy.deepcopy(self )
a_ : Optional[int] = self.input_schema.copy()
a_ : Any = features[self.audio_column]
a_ : Any = input_schema
return task_template
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 460 | 0 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_snake_case = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , __A ):
"""simple docstring"""
super().__init__()
lowerCamelCase : Optional[Any] = torchvision.models.resnetaaa(pretrained=lowerCamelCase_ )
lowerCamelCase : Union[str, Any] = list(model.children() )[:-2]
lowerCamelCase : str = nn.Sequential(*lowerCamelCase_ )
lowerCamelCase : List[Any] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Optional[int] = self.pool(self.model(lowerCamelCase_ ) )
lowerCamelCase : int = torch.flatten(lowerCamelCase_ , start_dim=2 )
lowerCamelCase : int = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class UpperCAmelCase_ ( snake_case__ ):
'''simple docstring'''
def __init__( self , __A , __A , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : str = [json.loads(lowerCamelCase_ ) for l in open(lowerCamelCase_ )]
lowerCamelCase : Union[str, Any] = os.path.dirname(lowerCamelCase_ )
lowerCamelCase : int = tokenizer
lowerCamelCase : Optional[int] = labels
lowerCamelCase : Union[str, Any] = len(lowerCamelCase_ )
lowerCamelCase : Any = max_seq_length
lowerCamelCase : List[str] = transforms
def __len__( self ):
"""simple docstring"""
return len(self.data )
def __getitem__( self , __A ):
"""simple docstring"""
lowerCamelCase : str = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=lowerCamelCase_ ) )
lowerCamelCase , lowerCamelCase , lowerCamelCase : Dict = sentence[0], sentence[1:-1], sentence[-1]
lowerCamelCase : int = sentence[: self.max_seq_length]
lowerCamelCase : Dict = torch.zeros(self.n_classes )
lowerCamelCase : List[Any] = 1
lowerCamelCase : List[str] = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
lowerCamelCase : Optional[Any] = self.transforms(lowerCamelCase_ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = [len(row["sentence"] ) for row in batch]
lowerCamelCase , lowerCamelCase : Tuple = len(_snake_case ), max(_snake_case )
lowerCamelCase : int = torch.zeros(_snake_case , _snake_case , dtype=torch.long )
lowerCamelCase : Any = torch.zeros(_snake_case , _snake_case , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_snake_case , _snake_case ) ):
lowerCamelCase : List[Any] = input_row["sentence"]
lowerCamelCase : int = 1
lowerCamelCase : Optional[int] = torch.stack([row["image"] for row in batch] )
lowerCamelCase : List[Any] = torch.stack([row["label"] for row in batch] )
lowerCamelCase : Dict = torch.stack([row["image_start_token"] for row in batch] )
lowerCamelCase : List[str] = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowercase_( ):
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowercase_( ):
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 340 | """simple docstring"""
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :str ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = {}
def lowerCamelCase__ ( self :Optional[int] ) -> None:
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(lowerCamelCase_ , " -> " , " -> ".join([str(lowerCamelCase_ ) for j in self.vertex[i]] ) )
def lowerCamelCase__ ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :int ) -> None:
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowerCamelCase_ )
else:
# else make a new vertex
UpperCamelCase__ = [to_vertex]
def lowerCamelCase__ ( self :Optional[int] ) -> None:
"""simple docstring"""
UpperCamelCase__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase__ ( self :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :list ) -> None:
"""simple docstring"""
UpperCamelCase__ = True
print(lowerCamelCase_ , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
A : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3 | 516 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __a :
def __init__( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int=13 , UpperCAmelCase_ : Dict=30 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Optional[Any]=37 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Optional[int]=2 , )-> str:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = scope
UpperCamelCase = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = num_patches + 1
def _SCREAMING_SNAKE_CASE ( self : str )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> Dict:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = ViTModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCamelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] )-> int:
"""simple docstring"""
UpperCamelCase = ViTForMaskedImageModeling(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCamelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = ViTForMaskedImageModeling(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] )-> Tuple:
"""simple docstring"""
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = ViTForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCamelCase = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = ViTForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> int:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ : str = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Union[str, Any] = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : Tuple = True
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Tuple = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> int:
"""simple docstring"""
UpperCamelCase = ViTModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : str )-> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> Optional[Any]:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Any )-> Any:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear ) )
def _SCREAMING_SNAKE_CASE ( self : Dict )-> Dict:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(UpperCAmelCase_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> int:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int )-> Any:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> Optional[Any]:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = ViTModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def lowerCamelCase__ ( )-> int:
"""simple docstring"""
UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : int )-> Any:
"""simple docstring"""
UpperCamelCase = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(UpperCAmelCase_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**UpperCAmelCase_ )
# verify the logits
UpperCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
UpperCamelCase = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any )-> int:
"""simple docstring"""
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
UpperCamelCase = ViTModel.from_pretrained("facebook/dino-vits8" ).to(UpperCAmelCase_ )
UpperCamelCase = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCAmelCase_ , return_tensors="pt" )
UpperCamelCase = inputs.pixel_values.to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_ )
# verify the logits
UpperCamelCase = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_ )
UpperCamelCase = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : int )-> Tuple:
"""simple docstring"""
UpperCamelCase = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCAmelCase_ , return_tensors="pt" )
UpperCamelCase = inputs.pixel_values.to(UpperCAmelCase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCamelCase = model(UpperCAmelCase_ )
| 556 |
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Any = (EulerDiscreteScheduler,)
UpperCamelCase_ : Dict = 10
def _SCREAMING_SNAKE_CASE ( self : Any , **UpperCAmelCase_ : str )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = {
"num_train_timesteps": 1_100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**UpperCAmelCase_ )
return config
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> Any:
"""simple docstring"""
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int )-> Any:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str )-> Dict:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> int:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str )-> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase = sample.to(UpperCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = model(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ )
UpperCamelCase = output.prev_sample
UpperCamelCase = torch.sum(torch.abs(UpperCAmelCase_ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> str:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCamelCase = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase = sample.to(UpperCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = model(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ )
UpperCamelCase = output.prev_sample
UpperCamelCase = torch.sum(torch.abs(UpperCAmelCase_ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2_676e-06 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase_ )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCamelCase = sample.to(UpperCAmelCase_ )
for t in scheduler.timesteps:
UpperCamelCase = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = model(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ )
UpperCamelCase = output.prev_sample
UpperCamelCase = torch.sum(torch.abs(UpperCAmelCase_ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCAmelCase_ , use_karras_sigmas=UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase_ )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCamelCase = sample.to(UpperCAmelCase_ )
for t in scheduler.timesteps:
UpperCamelCase = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = model(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ )
UpperCamelCase = output.prev_sample
UpperCamelCase = torch.sum(torch.abs(UpperCAmelCase_ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3
| 556 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Union[str, Any] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 | """simple docstring"""
from math import isqrt
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [True] * max_number
for i in range(2 ,isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 ,lowercase ,lowercase ):
_UpperCAmelCase = False
return [i for i in range(2 ,lowercase ) if is_prime[i]]
def __UpperCAmelCase ( lowercase = 10**8 ):
"""simple docstring"""
_UpperCAmelCase = calculate_prime_numbers(max_number // 2 )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = len(lowercase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 277 | 0 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__ ( __A ):
def __init__( self ,A ,A=768 ):
super().__init__(UpperCamelCase__ )
UpperCAmelCase = proj_size
UpperCAmelCase = CLIPVisionModel(UpperCamelCase__ )
UpperCAmelCase = PaintByExampleMapper(UpperCamelCase__ )
UpperCAmelCase = nn.LayerNorm(config.hidden_size )
UpperCAmelCase = nn.Linear(config.hidden_size ,self.proj_size )
# uncondition for scaling
UpperCAmelCase = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _UpperCamelCase ( self ,A ,A=False ):
UpperCAmelCase = self.model(pixel_values=UpperCamelCase__ )
UpperCAmelCase = clip_output.pooler_output
UpperCAmelCase = self.mapper(latent_states[:, None] )
UpperCAmelCase = self.final_layer_norm(UpperCamelCase__ )
UpperCAmelCase = self.proj_out(UpperCamelCase__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCamelCase__ ( nn.Module ):
def __init__( self ,A ):
super().__init__()
UpperCAmelCase = (config.num_hidden_layers + 1) // 5
UpperCAmelCase = config.hidden_size
UpperCAmelCase = 1
UpperCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,activation_fn="""gelu""" ,attention_bias=UpperCamelCase__ )
for _ in range(UpperCamelCase__ )
] )
def _UpperCamelCase ( self ,A ):
for block in self.blocks:
UpperCAmelCase = block(UpperCamelCase__ )
return hidden_states
| 708 |
"""simple docstring"""
from math import sqrt
def _a ( _snake_case = 100_0000 ):
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_snake_case , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 74 | 0 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
a_ : List[Any] = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
a_ : Union[str, Any] = flatten_dict(SCREAMING_SNAKE_CASE_ )
return flax_params
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
a_ : List[str] = {}
a_ : str = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
a_ : str = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
a_ : Dict = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
a_ : List[str] = new_key.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
a_ : List[str] = new_key.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
a_ : Union[str, Any] = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , SCREAMING_SNAKE_CASE_ )
a_ : str = new_key.replace("""encoder""" , """encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
a_ : List[Any] = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , SCREAMING_SNAKE_CASE_ )
a_ : Any = flax_dict[key]
a_ : Optional[Any] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
a_ : List[str] = torch.from_numpy(converted_dict[key].T )
else:
a_ : Optional[int] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : List[Any]=False ):
"""simple docstring"""
a_ : Any = get_flax_param(SCREAMING_SNAKE_CASE_ )
if not use_large:
a_ : Union[str, Any] = PixaStructVisionConfig()
a_ : List[Any] = PixaStructTextConfig()
else:
a_ : List[Any] = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
a_ : Dict = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
a_ : Union[str, Any] = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE_ )
a_ : Any = PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
a_ : Any = rename_and_convert_flax_params(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
a_ : str = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
a_ : Dict = PixaStructImageProcessor()
a_ : List[Any] = PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
if use_large:
a_ : Dict = 40_96
a_ : int = True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
print("""Model saved in {}""".format(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 419 |
import datasets
from .evaluate import evaluate
SCREAMING_SNAKE_CASE : Union[str, Any] = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
SCREAMING_SNAKE_CASE : List[Any] = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
SCREAMING_SNAKE_CASE : Any = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
def A ( self ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def A ( self , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
"""simple docstring"""
a_ : Optional[int] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
a_ : Dict = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
a_ : Optional[Any] = evaluate(dataset=UpperCamelCase_ , predictions=UpperCamelCase_ )
return score
| 419 | 1 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__UpperCAmelCase = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__UpperCAmelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__UpperCAmelCase = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
__UpperCAmelCase = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def _lowerCamelCase ( A_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] =None
# source code of `config_class`
UpperCamelCase__ : List[str] =inspect.getsource(A_ )
UpperCamelCase__ : Optional[Any] =_re_checkpoint.findall(A_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
UpperCamelCase__ : Optional[Any] =ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase__ : Dict =f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
UpperCamelCase__ : List[str] =ckpt_name
break
return checkpoint
def _lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : List[Any] =[]
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCamelCase__ : Optional[Any] =get_checkpoint_from_config_class(A_ )
UpperCamelCase__ : List[str] =config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(A_ )
if len(A_ ) > 0:
UpperCamelCase__ : Dict ="\n".join(sorted(A_ ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 703 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _lowerCamelCase ( A_ : Any ) -> str:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def _lowerCamelCase ( A_ : Optional[Any] , A_ : int ) -> str:
'''simple docstring'''
return (-y * np.log(A_ ) - (1 - y) * np.log(1 - h )).mean()
def _lowerCamelCase ( A_ : Any , A_ : Union[str, Any] , A_ : str ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Dict =np.dot(A_ , A_ )
return np.sum(y * scores - np.log(1 + np.exp(A_ ) ) )
def _lowerCamelCase ( A_ : Optional[int] , A_ : List[str] , A_ : Any , A_ : Dict=7_0_0_0_0 ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Tuple =np.zeros(x.shape[1] )
for iterations in range(A_ ):
UpperCamelCase__ : List[Any] =np.dot(A_ , A_ )
UpperCamelCase__ : Optional[int] =sigmoid_function(A_ )
UpperCamelCase__ : Optional[Any] =np.dot(x.T , h - y ) / y.size
UpperCamelCase__ : Optional[int] =theta - alpha * gradient # updating the weights
UpperCamelCase__ : Union[str, Any] =np.dot(A_ , A_ )
UpperCamelCase__ : Any =sigmoid_function(A_ )
UpperCamelCase__ : Dict =cost_function(A_ , A_ )
if iterations % 1_0_0 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__UpperCAmelCase = datasets.load_iris()
__UpperCAmelCase = iris.data[:, :2]
__UpperCAmelCase = (iris.target != 0) * 1
__UpperCAmelCase = 0.1
__UpperCAmelCase = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def _lowerCamelCase ( A_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return sigmoid_function(
np.dot(A_ , A_ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((__UpperCAmelCase) , (__UpperCAmelCase)) = (x[:, 0].min(), x[:, 0].max())
((__UpperCAmelCase) , (__UpperCAmelCase)) = (x[:, 1].min(), x[:, 1].max())
((__UpperCAmelCase) , (__UpperCAmelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__UpperCAmelCase = np.c_[xxa.ravel(), xxa.ravel()]
__UpperCAmelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 582 | 0 |
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
lowerCAmelCase = threading.Lock()
lowerCAmelCase = None
lowerCAmelCase = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
lowerCAmelCase = logging.WARNING
lowerCAmelCase = True
def lowerCAmelCase_ ( ) ->Optional[Any]:
lowerCamelCase__ : int =os.getenv('TRANSFORMERS_VERBOSITY' , snake_case_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def lowerCAmelCase_ ( ) ->str:
return __name__.split('.' )[0]
def lowerCAmelCase_ ( ) ->logging.Logger:
return logging.getLogger(_get_library_name() )
def lowerCAmelCase_ ( ) ->None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
lowerCamelCase__ : Optional[Any] =logging.StreamHandler() # Set sys.stderr as stream.
lowerCamelCase__ : Union[str, Any] =sys.stderr.flush
# Apply our default configuration to the library root logger.
lowerCamelCase__ : Union[str, Any] =_get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
lowerCamelCase__ : List[Any] =False
def lowerCAmelCase_ ( ) ->None:
global _default_handler
with _lock:
if not _default_handler:
return
lowerCamelCase__ : Tuple =_get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
lowerCamelCase__ : List[str] =None
def lowerCAmelCase_ ( ) ->List[str]:
return log_levels
def lowerCAmelCase_ ( snake_case_ : Optional[str] = None ) ->logging.Logger:
if name is None:
lowerCamelCase__ : Dict =_get_library_name()
_configure_library_root_logger()
return logging.getLogger(snake_case_ )
def lowerCAmelCase_ ( ) ->int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def lowerCAmelCase_ ( snake_case_ : int ) ->None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(snake_case_ )
def lowerCAmelCase_ ( ) ->Dict:
return set_verbosity(snake_case_ )
def lowerCAmelCase_ ( ) ->int:
return set_verbosity(snake_case_ )
def lowerCAmelCase_ ( ) ->Optional[int]:
return set_verbosity(snake_case_ )
def lowerCAmelCase_ ( ) ->int:
return set_verbosity(snake_case_ )
def lowerCAmelCase_ ( ) ->None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def lowerCAmelCase_ ( ) ->None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def lowerCAmelCase_ ( snake_case_ : logging.Handler ) ->None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : logging.Handler ) ->None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(snake_case_ )
def lowerCAmelCase_ ( ) ->None:
_configure_library_root_logger()
lowerCamelCase__ : Optional[int] =False
def lowerCAmelCase_ ( ) ->None:
_configure_library_root_logger()
lowerCamelCase__ : Optional[int] =True
def lowerCAmelCase_ ( ) ->None:
lowerCamelCase__ : List[str] =_get_library_root_logger().handlers
for handler in handlers:
lowerCamelCase__ : Any =logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(snake_case_ )
def lowerCAmelCase_ ( ) ->None:
lowerCamelCase__ : List[str] =_get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(snake_case_ )
def lowerCAmelCase_ ( self : int , *snake_case_ : Optional[int] , **snake_case_ : List[str] ) ->Tuple:
lowerCamelCase__ : Dict =os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , snake_case_ )
if no_advisory_warnings:
return
self.warning(*snake_case_ , **snake_case_ )
lowerCAmelCase = warning_advice
@functools.lru_cache(snake_case_ )
def lowerCAmelCase_ ( self : List[str] , *snake_case_ : int , **snake_case_ : Optional[int] ) ->List[Any]:
self.warning(*snake_case_ , **snake_case_ )
lowerCAmelCase = warning_once
class A_ :
"""simple docstring"""
def __init__( self :List[str] , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Union[str, Any] ): # pylint: disable=unused-argument
"""simple docstring"""
lowerCamelCase__ : int =args[0] if args else None
def __iter__( self :Dict ):
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self :Union[str, Any] , lowerCamelCase_ :str ):
"""simple docstring"""
def empty_fn(*lowerCamelCase_ :List[Any] , **lowerCamelCase_ :str ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self :int ):
"""simple docstring"""
return self
def __exit__( self :List[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[Any] ):
"""simple docstring"""
return
class A_ :
"""simple docstring"""
def __call__( self :List[str] , *lowerCamelCase_ :Optional[int] , **lowerCamelCase_ :Dict ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*lowerCamelCase_ , **lowerCamelCase_ )
else:
return EmptyTqdm(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :str , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :str ):
"""simple docstring"""
lowerCamelCase__ : str =None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowerCAmelCase = _tqdm_cls()
def lowerCAmelCase_ ( ) ->bool:
global _tqdm_active
return bool(_tqdm_active )
def lowerCAmelCase_ ( ) ->Tuple:
global _tqdm_active
lowerCamelCase__ : Union[str, Any] =True
hf_hub_utils.enable_progress_bars()
def lowerCAmelCase_ ( ) ->Optional[int]:
global _tqdm_active
lowerCamelCase__ : List[Any] =False
hf_hub_utils.disable_progress_bars() | 174 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase = """pt"""
elif is_tf_available():
lowerCAmelCase = """tf"""
else:
lowerCAmelCase = """jax"""
class A_ ( A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = PerceiverTokenizer
SCREAMING_SNAKE_CASE_ = False
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
super().setUp()
lowerCamelCase__ : Optional[Any] =PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def UpperCAmelCase__ ( self :Optional[Any] , **lowerCamelCase_ :List[str] ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :str=False , lowerCamelCase_ :List[Any]=20 , lowerCamelCase_ :List[Any]=5 ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =[]
for i in range(len(lowerCamelCase_ ) ):
try:
lowerCamelCase__ : str =tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : int =list(filter(lambda lowerCamelCase_ : re.match(R'^[ a-zA-Z]+$' , t[1] ) , lowerCamelCase_ ) )
lowerCamelCase__ : Any =list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCamelCase_ ) , lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
lowerCamelCase__ : Any =toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
lowerCamelCase__ : str =toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : List[str] =[t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : List[str] =tokenizer.decode(lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
lowerCamelCase__ : Union[str, Any] =(
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
lowerCamelCase__ : Union[str, Any] =' ' + output_txt
lowerCamelCase__ : List[Any] =tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =self.perceiver_tokenizer
lowerCamelCase__ : Any ='Unicode €.'
lowerCamelCase__ : str =tokenizer(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =[4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , lowerCamelCase_ )
# decoding
lowerCamelCase__ : Dict =tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , '[CLS]Unicode €.[SEP]' )
lowerCamelCase__ : List[Any] =tokenizer('e è é ê ë' )
lowerCamelCase__ : str =[4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , lowerCamelCase_ )
# decoding
lowerCamelCase__ : List[str] =tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =self.perceiver_tokenizer
lowerCamelCase__ : Optional[int] =['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCamelCase__ : Dict =[4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
lowerCamelCase__ : Union[str, Any] =tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
if FRAMEWORK != "jax":
lowerCamelCase__ : int =list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : Dict =list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =self.perceiver_tokenizer
lowerCamelCase__ : Optional[Any] =['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase__ : Any =tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowerCamelCase_ )
self.assertIn('attention_mask' , lowerCamelCase_ )
self.assertNotIn('decoder_input_ids' , lowerCamelCase_ )
self.assertNotIn('decoder_attention_mask' , lowerCamelCase_ )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =self.perceiver_tokenizer
lowerCamelCase__ : Dict =[
'Summary of the text.',
'Another summary.',
]
lowerCamelCase__ : Dict =tokenizer(
text_target=lowerCamelCase_ , max_length=32 , padding='max_length' , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Dict =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase__ : Union[str, Any] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : str =tempfile.mkdtemp()
lowerCamelCase__ : Dict =' He is very happy, UNwant\u00E9d,running'
lowerCamelCase__ : List[Any] =tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : List[Any] =tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Any =after_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
lowerCamelCase__ : Dict =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Union[str, Any] =tempfile.mkdtemp()
lowerCamelCase__ : Tuple =' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCamelCase__ : Dict =tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCamelCase__ : str =tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str =tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple =after_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase__ : Optional[int] =tokenizer.__class__.from_pretrained(lowerCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCamelCase_ )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : str =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
lowerCamelCase__ : int =json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
lowerCamelCase__ : Any =json.load(lowerCamelCase_ )
lowerCamelCase__ : Tuple =[f"""<extra_id_{i}>""" for i in range(125 )]
lowerCamelCase__ : Tuple =added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCamelCase__ : Dict =added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowerCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Tuple =tokenizer_class.from_pretrained(
lowerCamelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Dict =added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowerCamelCase_ )]
lowerCamelCase__ : int =tokenizer_class.from_pretrained(
lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Any =self.get_tokenizers(fast=lowerCamelCase_ , do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase__ : Optional[int] =['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
lowerCamelCase__ : str =tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) | 174 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __a ( __snake_case ):
__lowercase : List[Any] = ['image_processor', 'tokenizer']
__lowercase : Dict = 'AutoImageProcessor'
__lowercase : Tuple = 'AutoTokenizer'
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
super().__init__(__UpperCamelCase , __UpperCamelCase )
lowercase__: Union[str, Any] = self.image_processor
def __call__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowercase__: List[Any] = self.tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if images is not None:
lowercase__: Any = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if text is not None and images is not None:
lowercase__: Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 702 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __a ( __UpperCamelCase ):
__lowercase : Optional[Any] = 'beit'
def __init__( self , lowerCAmelCase__=8_192 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3_072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=224 , lowerCAmelCase__=16 , lowerCAmelCase__=3 , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=True , lowerCAmelCase__=[3, 5, 7, 11] , lowerCAmelCase__=[1, 2, 3, 6] , lowerCAmelCase__=True , lowerCAmelCase__=0.4 , lowerCAmelCase__=256 , lowerCAmelCase__=1 , lowerCAmelCase__=False , lowerCAmelCase__=255 , **lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowercase__: Optional[Any] = vocab_size
lowercase__: Dict = hidden_size
lowercase__: int = num_hidden_layers
lowercase__: List[Any] = num_attention_heads
lowercase__: List[str] = intermediate_size
lowercase__: Any = hidden_act
lowercase__: List[str] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: Optional[Any] = initializer_range
lowercase__: Tuple = layer_norm_eps
lowercase__: Optional[Any] = image_size
lowercase__: List[str] = patch_size
lowercase__: List[str] = num_channels
lowercase__: List[Any] = use_mask_token
lowercase__: Tuple = use_absolute_position_embeddings
lowercase__: Tuple = use_relative_position_bias
lowercase__: int = use_shared_relative_position_bias
lowercase__: Dict = layer_scale_init_value
lowercase__: List[Any] = drop_path_rate
lowercase__: Optional[int] = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__: Optional[Any] = out_indices
lowercase__: Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__: Dict = use_auxiliary_head
lowercase__: Union[str, Any] = auxiliary_loss_weight
lowercase__: Tuple = auxiliary_channels
lowercase__: Any = auxiliary_num_convs
lowercase__: Optional[Any] = auxiliary_concat_input
lowercase__: Optional[int] = semantic_loss_ignore_index
class __a ( __UpperCamelCase ):
__lowercase : Optional[int] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
'''simple docstring'''
return 1E-4
| 335 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DanceDiffusionPipeline
__SCREAMING_SNAKE_CASE = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
__SCREAMING_SNAKE_CASE = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : str = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowerCamelCase , use_timestep_embedding=lowerCamelCase , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
UpperCamelCase : List[str] = IPNDMScheduler()
UpperCamelCase : Dict = {
"unet": unet,
"scheduler": scheduler,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(lowerCamelCase ).startswith("mps" ):
UpperCamelCase : Dict = torch.manual_seed(lowerCamelCase )
else:
UpperCamelCase : str = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
UpperCamelCase : Optional[int] = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : int = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : List[Any] = DanceDiffusionPipeline(**lowerCamelCase )
UpperCamelCase : Any = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
UpperCamelCase : int = self.get_dummy_inputs(lowerCamelCase )
UpperCamelCase : Union[str, Any] = pipe(**lowerCamelCase )
UpperCamelCase : Optional[int] = output.audios
UpperCamelCase : Optional[int] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
UpperCamelCase : Any = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : Tuple = torch_device
UpperCamelCase : Optional[Any] = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
UpperCamelCase : Any = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
UpperCamelCase : int = torch.manual_seed(0 )
UpperCamelCase : int = pipe(generator=lowerCamelCase , num_inference_steps=1_00 , audio_length_in_s=4.096 )
UpperCamelCase : Optional[int] = output.audios
UpperCamelCase : Any = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase : List[str] = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
UpperCamelCase : Optional[Any] = torch_device
UpperCamelCase : Tuple = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
UpperCamelCase : Any = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
UpperCamelCase : Any = torch.manual_seed(0 )
UpperCamelCase : Dict = pipe(generator=lowerCamelCase , num_inference_steps=1_00 , audio_length_in_s=4.096 )
UpperCamelCase : Any = output.audios
UpperCamelCase : str = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase : Any = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 173 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''roberta'''
def __init__( self , lowerCamelCase=5_02_65 , lowerCamelCase=7_68 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=30_72 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=5_12 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
UpperCamelCase : Any = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Dict = num_hidden_layers
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Optional[int] = intermediate_size
UpperCamelCase : Optional[int] = hidden_dropout_prob
UpperCamelCase : Tuple = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : List[str] = type_vocab_size
UpperCamelCase : Tuple = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : Union[str, Any] = position_embedding_type
UpperCamelCase : Tuple = use_cache
UpperCamelCase : Any = classifier_dropout
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 173 | 1 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowercase_ ( A ):
def _snake_case ( self , __A ) -> float:
return 0.0
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : int ) -> tuple[int | float, int | float]:
SCREAMING_SNAKE_CASE_ : Tuple =min([-2_0, np.min(fft_results[1 : samplerate // 2 - 1] )] )
SCREAMING_SNAKE_CASE_ : Any =max([2_0, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : FilterType , UpperCAmelCase_ : int ) -> None:
SCREAMING_SNAKE_CASE_ : Tuple =5_1_2
SCREAMING_SNAKE_CASE_ : Tuple =[1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[filter_type.process(UpperCAmelCase_ ) for item in inputs]
SCREAMING_SNAKE_CASE_ : int =[0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE_ : Tuple =np.abs(np.fft.fft(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE_ : List[Any] =2_0 * np.logaa(UpperCAmelCase_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
SCREAMING_SNAKE_CASE_ : Union[str, Any] =get_bounds(UpperCAmelCase_ , UpperCAmelCase_ )
plt.ylim(max([-8_0, bounds[0]] ) , min([8_0, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(UpperCAmelCase_ )
plt.show()
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : FilterType , UpperCAmelCase_ : int ) -> None:
SCREAMING_SNAKE_CASE_ : Dict =5_1_2
SCREAMING_SNAKE_CASE_ : Optional[int] =[1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE_ : Tuple =[filter_type.process(UpperCAmelCase_ ) for item in inputs]
SCREAMING_SNAKE_CASE_ : str =[0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE_ : Dict =np.angle(np.fft.fft(UpperCAmelCase_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(UpperCAmelCase_ , -2 * pi ) )
plt.show()
| 714 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) -> str:
# Load configuration defined in the metadata file
with open(UpperCAmelCase_ ) as metadata_file:
SCREAMING_SNAKE_CASE_ : Dict =json.load(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Any =LukeConfig(use_entity_aware_attention=UpperCAmelCase_ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE_ : List[Any] =torch.load(UpperCAmelCase_ , map_location='''cpu''' )
# Load the entity vocab file
SCREAMING_SNAKE_CASE_ : int =load_entity_vocab(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE_ : Optional[int] =AddedToken('''<ent>''' , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str =AddedToken('''<ent2>''' , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] =LukeTokenizer.from_pretrained(UpperCAmelCase_ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE_ : List[str] =state_dict['''embeddings.word_embeddings.weight''']
SCREAMING_SNAKE_CASE_ : Optional[Any] =word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : Tuple =word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : List[str] =torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE_ : Optional[Any] =f'encoder.layer.{layer_index}.attention.self.'
SCREAMING_SNAKE_CASE_ : List[Any] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE_ : Optional[Any] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE_ : int =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE_ : Optional[int] =state_dict['''entity_embeddings.entity_embeddings.weight''']
SCREAMING_SNAKE_CASE_ : List[str] =entity_emb[entity_vocab['''[MASK]''']]
SCREAMING_SNAKE_CASE_ : Any =LukeModel(config=UpperCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple =model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
if not (len(UpperCAmelCase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'Missing keys {", ".join(UpperCAmelCase_ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
f' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
SCREAMING_SNAKE_CASE_ : int =LukeTokenizer.from_pretrained(UpperCAmelCase_ , task='''entity_classification''' )
SCREAMING_SNAKE_CASE_ : List[str] =(
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
SCREAMING_SNAKE_CASE_ : List[Any] =(3_9, 4_2)
SCREAMING_SNAKE_CASE_ : Tuple =tokenizer(UpperCAmelCase_ , entity_spans=[span] , add_prefix_space=UpperCAmelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =model(**UpperCAmelCase_ )
# Verify word hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE_ : Tuple =torch.Size((1, 4_2, 1_0_2_4) )
SCREAMING_SNAKE_CASE_ : Any =torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
SCREAMING_SNAKE_CASE_ : List[str] =torch.Size((1, 4_2, 7_6_8) )
SCREAMING_SNAKE_CASE_ : List[Any] =torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE_ : Any =torch.Size((1, 1, 1_0_2_4) )
SCREAMING_SNAKE_CASE_ : List[Any] =torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.Size((1, 1, 7_6_8) )
SCREAMING_SNAKE_CASE_ : Tuple =torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCAmelCase_ ) )
model.save_pretrained(UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Any ) -> int:
SCREAMING_SNAKE_CASE_ : str ={}
with open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] =line.rstrip().split('''\t''' )
SCREAMING_SNAKE_CASE_ : Tuple =index
return entity_vocab
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
_lowercase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 431 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
a = random.Random()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=1.0 , UpperCAmelCase__=None , UpperCAmelCase__=None ):
if rng is None:
lowercase_ = global_rng
lowercase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int=7 , UpperCamelCase__ : Optional[int]=400 , UpperCamelCase__ : Optional[Any]=2_000 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : List[Any]=16_000 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Tuple=80 , UpperCamelCase__ : List[str]=16 , UpperCamelCase__ : str=64 , UpperCamelCase__ : Optional[int]="hann_window" , UpperCamelCase__ : List[Any]=80 , UpperCamelCase__ : Union[str, Any]=7_600 , UpperCamelCase__ : Tuple=1e-10 , UpperCamelCase__ : Tuple=True , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = min_seq_length
lowercase_ = max_seq_length
lowercase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase_ = feature_size
lowercase_ = padding_value
lowercase_ = sampling_rate
lowercase_ = do_normalize
lowercase_ = num_mel_bins
lowercase_ = hop_length
lowercase_ = win_length
lowercase_ = win_function
lowercase_ = fmin
lowercase_ = fmax
lowercase_ = mel_floor
lowercase_ = return_attention_mask
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
def _flatten(UpperCamelCase__ : Optional[int] ):
return list(itertools.chain(*__UpperCAmelCase ) )
if equal_length:
lowercase_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowercase_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase_ = [np.asarray(__UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : str=False ):
'''simple docstring'''
if equal_length:
lowercase_ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase_ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase_ = [np.asarray(__UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class UpperCamelCase__ ( __UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = SpeechTaFeatureExtractor
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = SpeechTaFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(__UpperCAmelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCAmelCase , axis=0 ) - 1 ) < 1e-3 ) )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowercase_ = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
lowercase_ = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
lowercase_ = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
# Test batched
lowercase_ = feat_extract(__UpperCAmelCase , return_tensors="""np""" ).input_values
lowercase_ = feat_extract(__UpperCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowercase_ = ["longest", "max_length", "do_not_pad"]
lowercase_ = [None, 1_600, None]
for max_length, padding in zip(__UpperCAmelCase , __UpperCAmelCase ):
lowercase_ = feat_extract(__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors="""np""" )
lowercase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ = range(800 , 1_400 , 200 )
lowercase_ = [floats_list((1, x) )[0] for x in lengths]
lowercase_ = ["longest", "max_length", "do_not_pad"]
lowercase_ = [None, 1_600, None]
for max_length, padding in zip(__UpperCAmelCase , __UpperCAmelCase ):
lowercase_ = feat_extract(__UpperCAmelCase , max_length=__UpperCAmelCase , padding=__UpperCAmelCase )
lowercase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowercase_ = feat_extract(
__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=1_000 , padding="""max_length""" , return_tensors="""np""" )
lowercase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowercase_ = feat_extract(
__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=1_000 , padding="""longest""" , return_tensors="""np""" )
lowercase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
lowercase_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowercase_ = feat_extract(
__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=2_000 , padding="""longest""" , return_tensors="""np""" )
lowercase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ = np.random.rand(100 ).astype(np.floataa )
lowercase_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase_ = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowercase_ = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowercase_ = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs]
# Test feature size
lowercase_ = feature_extractor(audio_target=__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="""np""" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
lowercase_ = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_values
lowercase_ = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
# Test batched
lowercase_ = feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_values
lowercase_ = feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowercase_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase_ = np.asarray(__UpperCAmelCase )
lowercase_ = feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_values
lowercase_ = feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = self.feat_extract_tester.prepare_inputs_for_target()
lowercase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ = feat_extract.model_input_names[0]
lowercase_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) for x, y in zip(__UpperCAmelCase , processed_features[input_name] ) ) )
lowercase_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__UpperCAmelCase )
lowercase_ = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
lowercase_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__UpperCAmelCase )
lowercase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ = feat_extract.model_input_names[0]
lowercase_ = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
lowercase_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ = self.feat_extract_tester.prepare_inputs_for_target()
lowercase_ = feat_extract.model_input_names[0]
lowercase_ = BatchFeature({input_name: speech_inputs} )
lowercase_ = feat_extract.num_mel_bins # hack!
lowercase_ = feat_extract.pad(__UpperCAmelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
lowercase_ = feat_extract.pad(__UpperCAmelCase , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = self.feat_extract_dict
lowercase_ = True
lowercase_ = self.feature_extraction_class(**__UpperCAmelCase )
lowercase_ = self.feat_extract_tester.prepare_inputs_for_target()
lowercase_ = [len(__UpperCAmelCase ) for x in speech_inputs]
lowercase_ = feat_extract.model_input_names[0]
lowercase_ = BatchFeature({input_name: speech_inputs} )
lowercase_ = feat_extract.num_mel_bins # hack!
lowercase_ = feat_extract.pad(__UpperCAmelCase , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __UpperCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __UpperCAmelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = self.feat_extract_dict
lowercase_ = True
lowercase_ = self.feature_extraction_class(**__UpperCAmelCase )
lowercase_ = self.feat_extract_tester.prepare_inputs_for_target()
lowercase_ = [len(__UpperCAmelCase ) for x in speech_inputs]
lowercase_ = feat_extract.model_input_names[0]
lowercase_ = BatchFeature({input_name: speech_inputs} )
lowercase_ = min(__UpperCAmelCase )
lowercase_ = feat_extract.num_mel_bins # hack!
lowercase_ = feat_extract.pad(
__UpperCAmelCase , padding="""max_length""" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __UpperCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
from datasets import load_dataset
lowercase_ = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
lowercase_ = ds.sort("""id""" ).select(range(__UpperCAmelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] )
# fmt: on
lowercase_ = self._load_datasamples(1 )
lowercase_ = SpeechTaFeatureExtractor()
lowercase_ = feature_extractor(__UpperCAmelCase , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 93_680) )
self.assertTrue(torch.allclose(input_values[0, :30] , __UpperCAmelCase , atol=1e-6 ) )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
lowercase_ = self._load_datasamples(1 )
lowercase_ = SpeechTaFeatureExtractor()
lowercase_ = feature_extractor(audio_target=__UpperCAmelCase , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , __UpperCAmelCase , atol=1e-4 ) )
| 412 |
UpperCAmelCase__ = '''Input must be a string of 8 numbers plus letter'''
UpperCAmelCase__ = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def a_ (__A ) -> bool:
"""simple docstring"""
if not isinstance(__A , __A ):
__a : Any = f'Expected string as input, found {type(__A ).__name__}'
raise TypeError(__A )
__a : int = spanish_id.replace("-" , "" ).upper()
if len(__A ) != 9:
raise ValueError(__A )
try:
__a : Tuple = int(spanish_id_clean[0:8] )
__a : Optional[int] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__A ) from ex
if letter.isdigit():
raise ValueError(__A )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger('transformers.models.speecht5')
__UpperCAmelCase = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
__UpperCAmelCase = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
__UpperCAmelCase = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
__UpperCAmelCase = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
__UpperCAmelCase = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
__UpperCAmelCase = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
__UpperCAmelCase = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
__UpperCAmelCase = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
__UpperCAmelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__UpperCAmelCase = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__UpperCAmelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__UpperCAmelCase = []
__UpperCAmelCase = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
__UpperCAmelCase = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
__UpperCAmelCase = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
__UpperCAmelCase = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
for attribute in key.split("." ):
_lowerCAmelCase : Optional[Any] = getattr(__snake_case , __snake_case )
if weight_type is not None:
_lowerCAmelCase : Dict = getattr(__snake_case , __snake_case ).shape
else:
_lowerCAmelCase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_lowerCAmelCase : List[str] = value
elif weight_type == "weight_g":
_lowerCAmelCase : List[Any] = value
elif weight_type == "weight_v":
_lowerCAmelCase : Dict = value
elif weight_type == "bias":
_lowerCAmelCase : List[Any] = value
elif weight_type == "running_mean":
_lowerCAmelCase : List[Any] = value
elif weight_type == "running_var":
_lowerCAmelCase : Optional[Any] = value
elif weight_type == "num_batches_tracked":
_lowerCAmelCase : str = value
else:
_lowerCAmelCase : int = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_lowerCAmelCase , _lowerCAmelCase : List[Any] = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase : Optional[int] = []
if task == "s2t":
_lowerCAmelCase : str = hf_model.speechta.encoder.prenet.feature_encoder
_lowerCAmelCase : Optional[int] = MAPPING_S2T
_lowerCAmelCase : int = IGNORE_KEYS_S2T
elif task == "t2s":
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : List[str] = MAPPING_T2S
_lowerCAmelCase : List[Any] = IGNORE_KEYS_T2S
elif task == "s2s":
_lowerCAmelCase : Optional[int] = hf_model.speechta.encoder.prenet.feature_encoder
_lowerCAmelCase : Union[str, Any] = MAPPING_S2S
_lowerCAmelCase : Tuple = IGNORE_KEYS_S2S
else:
raise ValueError(f'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(__snake_case , __snake_case ):
logger.info(f'''{name} was ignored''' )
continue
_lowerCAmelCase : str = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == "group" , )
_lowerCAmelCase : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = key.split(".*." )
if prefix in name and suffix in name:
_lowerCAmelCase : Optional[Any] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_lowerCAmelCase : Dict = True
if "*" in mapped_key:
_lowerCAmelCase : List[Any] = name.split(__snake_case )[0].split("." )[-2]
_lowerCAmelCase : Optional[Any] = mapped_key.replace("*" , __snake_case )
if "weight_g" in name:
_lowerCAmelCase : List[str] = "weight_g"
elif "weight_v" in name:
_lowerCAmelCase : Tuple = "weight_v"
elif "bias" in name:
_lowerCAmelCase : Dict = "bias"
elif "weight" in name:
_lowerCAmelCase : Any = "weight"
elif "running_mean" in name:
_lowerCAmelCase : Dict = "running_mean"
elif "running_var" in name:
_lowerCAmelCase : int = "running_var"
elif "num_batches_tracked" in name:
_lowerCAmelCase : Optional[Any] = "num_batches_tracked"
else:
_lowerCAmelCase : Tuple = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase : List[str] = full_name.split("conv_layers." )[-1]
_lowerCAmelCase : Any = name.split("." )
_lowerCAmelCase : int = int(items[0] )
_lowerCAmelCase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
_lowerCAmelCase : Any = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
_lowerCAmelCase : Optional[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
_lowerCAmelCase : Union[str, Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
_lowerCAmelCase : Tuple = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> str:
"""simple docstring"""
if config_path is not None:
_lowerCAmelCase : int = SpeechTaConfig.from_pretrained(__snake_case )
else:
_lowerCAmelCase : str = SpeechTaConfig()
if task == "s2t":
_lowerCAmelCase : int = config.max_text_positions
_lowerCAmelCase : Union[str, Any] = SpeechTaForSpeechToText(__snake_case )
elif task == "t2s":
_lowerCAmelCase : Optional[int] = 1876
_lowerCAmelCase : Optional[Any] = 600
_lowerCAmelCase : Any = config.max_speech_positions
_lowerCAmelCase : Dict = SpeechTaForTextToSpeech(__snake_case )
elif task == "s2s":
_lowerCAmelCase : List[Any] = 1876
_lowerCAmelCase : str = config.max_speech_positions
_lowerCAmelCase : Tuple = SpeechTaForSpeechToSpeech(__snake_case )
else:
raise ValueError(f'''Unknown task name: {task}''' )
if vocab_path:
_lowerCAmelCase : str = SpeechTaTokenizer(__snake_case , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_lowerCAmelCase : List[str] = AddedToken("<mask>" , lstrip=__snake_case , rstrip=__snake_case )
_lowerCAmelCase : Any = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
_lowerCAmelCase : Tuple = SpeechTaFeatureExtractor()
_lowerCAmelCase : List[Any] = SpeechTaProcessor(tokenizer=__snake_case , feature_extractor=__snake_case )
processor.save_pretrained(__snake_case )
_lowerCAmelCase : str = torch.load(__snake_case )
recursively_load_weights(fairseq_checkpoint["model"] , __snake_case , __snake_case )
model.save_pretrained(__snake_case )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(__snake_case )
model.push_to_hub(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__UpperCAmelCase = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 707 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__UpperCAmelCase = logging.get_logger(__name__)
class A__ ( A ):
"""simple docstring"""
_lowercase : int = ['''pixel_values''']
def __init__( self : Union[str, Any] , A_ : bool = True , A_ : Optional[Dict[str, int]] = None , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 2_5_5 , A_ : bool = True , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[float, List[float]]] = None , **A_ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**A_ )
_lowerCAmelCase : Optional[Any] = size if size is not None else {"shortest_edge": 2_5_6}
_lowerCAmelCase : Optional[int] = get_size_dict(A_ , default_to_square=A_ )
_lowerCAmelCase : Dict = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
_lowerCAmelCase : Tuple = get_size_dict(A_ , param_name="crop_size" )
_lowerCAmelCase : Dict = do_resize
_lowerCAmelCase : Optional[Any] = size
_lowerCAmelCase : Union[str, Any] = resample
_lowerCAmelCase : List[Any] = do_center_crop
_lowerCAmelCase : str = crop_size
_lowerCAmelCase : int = do_rescale
_lowerCAmelCase : Tuple = rescale_factor
_lowerCAmelCase : str = do_normalize
_lowerCAmelCase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __magic_name__ ( self : Dict , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Tuple , ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowerCAmelCase : Optional[Any] = get_resize_output_image_size(A_ , size=size["shortest_edge"] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def __magic_name__ ( self : List[Any] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Dict , ):
'''simple docstring'''
_lowerCAmelCase : Any = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(A_ , size=(size["height"], size["width"]) , data_format=A_ , **A_ )
def __magic_name__ ( self : int , A_ : np.ndarray , A_ : float , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : List[Any] ):
'''simple docstring'''
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def __magic_name__ ( self : Tuple , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : List[Any] , ):
'''simple docstring'''
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def __magic_name__ ( self : List[str] , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Dict[str, int] = None , A_ : PILImageResampling = None , A_ : bool = None , A_ : Dict[str, int] = None , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A_ : int , ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : Optional[Any] = size if size is not None else self.size
_lowerCAmelCase : Optional[Any] = get_size_dict(A_ , default_to_square=A_ )
_lowerCAmelCase : Tuple = resample if resample is not None else self.resample
_lowerCAmelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : List[Any] = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : str = get_size_dict(A_ , param_name="crop_size" )
_lowerCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : int = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : Any = image_std if image_std is not None else self.image_std
_lowerCAmelCase : str = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_lowerCAmelCase : Any = [to_numpy_array(A_ ) for image in images]
if do_resize:
_lowerCAmelCase : str = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
_lowerCAmelCase : List[Any] = [self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
_lowerCAmelCase : Dict = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
_lowerCAmelCase : Tuple = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
_lowerCAmelCase : List[Any] = [to_channel_dimension_format(A_ , A_ ) for image in images]
_lowerCAmelCase : Tuple = {"pixel_values": images}
return BatchFeature(data=A_ , tensor_type=A_ )
def __magic_name__ ( self : Optional[int] , A_ : Tuple , A_ : List[Tuple] = None ):
'''simple docstring'''
_lowerCAmelCase : int = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(A_ ):
_lowerCAmelCase : List[str] = target_sizes.numpy()
_lowerCAmelCase : Tuple = []
for idx in range(len(A_ ) ):
_lowerCAmelCase : Tuple = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=A_ )
_lowerCAmelCase : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
_lowerCAmelCase : List[Any] = logits.argmax(dim=1 )
_lowerCAmelCase : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 503 | 0 |
"""simple docstring"""
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 420 | """simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
UpperCAmelCase = 6_37_81_37.0
UpperCAmelCase = 6_35_67_52.31_42_45
UpperCAmelCase = 6_378_137
def lowercase ( a__ : float , a__ : float , a__ : float , a__ : float ) -> float:
_UpperCamelCase = (AXIS_A - AXIS_B) / AXIS_A
_UpperCamelCase = atan((1 - flattening) * tan(radians(a__ ) ) )
_UpperCamelCase = atan((1 - flattening) * tan(radians(a__ ) ) )
_UpperCamelCase = radians(a__ )
_UpperCamelCase = radians(a__ )
# Equation
_UpperCamelCase = sin((phi_a - phi_a) / 2 )
_UpperCamelCase = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
_UpperCamelCase = sqrt(sin_sq_phi + (cos(a__ ) * cos(a__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 420 | 1 |
from math import sqrt
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( SCREAMING_SNAKE_CASE_ : List[Any] = 1_0_0_0_1 ):
"""simple docstring"""
UpperCamelCase : Dict = 0
UpperCamelCase : Tuple = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 712 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : str = DebertaTokenizer
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Optional[int] = DebertaTokenizerFast
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
UpperCamelCase : Tuple = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase : List[Any] = {'''unk_token''': '''[UNK]'''}
UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = '''lower newer'''
return input_text, output_text
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.get_tokenizer()
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCamelCase : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = tokens + [tokenizer.unk_token]
UpperCamelCase : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.get_tokenizer()
UpperCamelCase : Optional[Any] = tokenizer('''Hello''' , '''World''' )
UpperCamelCase : List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCamelCase : Optional[int] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : str = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
UpperCamelCase : Union[str, Any] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) for seq in encoding['''input_ids''']]
# fmt: off
UpperCamelCase : int = {
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCamelCase : List[str] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __SCREAMING_SNAKE_CASE )
for expected, decoded in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 643 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Tuple = logging.get_logger(__name__)
_a : List[Any] = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "open-llama"
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Dict=10_0000 , SCREAMING_SNAKE_CASE_ : int=4096 , SCREAMING_SNAKE_CASE_ : str=1_1008 , SCREAMING_SNAKE_CASE_ : List[str]=32 , SCREAMING_SNAKE_CASE_ : Dict=32 , SCREAMING_SNAKE_CASE_ : Optional[int]="silu" , SCREAMING_SNAKE_CASE_ : Dict=2048 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0_2 , SCREAMING_SNAKE_CASE_ : int=1e-6 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Tuple=0 , SCREAMING_SNAKE_CASE_ : Any=1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Any , ) -> Optional[Any]:
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = hidden_size
__snake_case = intermediate_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = initializer_range
__snake_case = rms_norm_eps
__snake_case = use_cache
__snake_case = kwargs.pop(
'use_memorry_efficient_attention' , SCREAMING_SNAKE_CASE_ )
__snake_case = hidden_dropout_prob
__snake_case = attention_dropout_prob
__snake_case = use_stable_embedding
__snake_case = shared_input_output_embedding
__snake_case = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def a ( self : List[Any] ) -> List[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'got {self.rope_scaling}' )
__snake_case = self.rope_scaling.get('type' , SCREAMING_SNAKE_CASE_ )
__snake_case = self.rope_scaling.get('factor' , SCREAMING_SNAKE_CASE_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 56 |
'''simple docstring'''
from __future__ import annotations
a = list[tuple[int, int]]
a = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class a_ :
def __init__( self : Tuple , a_ : int , a_ : int , a_ : int , a_ : int , a_ : float , a_ : Node | None , ) -> Optional[Any]:
snake_case: str =pos_x
snake_case: str =pos_y
snake_case: Optional[Any] =(pos_y, pos_x)
snake_case: Union[str, Any] =goal_x
snake_case: Tuple =goal_y
snake_case: Tuple =g_cost
snake_case: Tuple =parent
snake_case: Optional[Any] =self.calculate_heuristic()
def UpperCamelCase ( self : Union[str, Any] ) -> float:
snake_case: Optional[Any] =abs(self.pos_x - self.goal_x )
snake_case: Optional[int] =abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Union[str, Any] , a_ : Tuple ) -> bool:
return self.f_cost < other.f_cost
class a_ :
def __init__( self : Any , a_ : tuple[int, int] , a_ : tuple[int, int] ) -> Optional[Any]:
snake_case: Any =Node(start[1] , start[0] , goal[1] , goal[0] , 0 , a_ )
snake_case: str =Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , a_ )
snake_case: int =[self.start]
snake_case: list[Node] =[]
snake_case: Tuple =False
def UpperCamelCase ( self : str ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case: Tuple =self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
snake_case: Tuple =True
return self.retrace_path(a_ )
self.closed_nodes.append(a_ )
snake_case: Optional[Any] =self.get_successors(a_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(a_ )
else:
# retrieve the best current path
snake_case: List[Any] =self.open_nodes.pop(self.open_nodes.index(a_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(a_ )
else:
self.open_nodes.append(a_ )
if not self.reached:
return [self.start.pos]
return None
def UpperCamelCase ( self : str , a_ : Node ) -> list[Node]:
snake_case: Any =[]
for action in delta:
snake_case: Any =parent.pos_x + action[1]
snake_case: str =parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(a_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
a_ , a_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , a_ , ) )
return successors
def UpperCamelCase ( self : Dict , a_ : Node | None ) -> Path:
snake_case: Dict =node
snake_case: Optional[int] =[]
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case: List[str] =current_node.parent
path.reverse()
return path
if __name__ == "__main__":
a = (0, 0)
a = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
a = GreedyBestFirst(init, goal)
a = greedy_bf.search()
if path:
for pos_x, pos_y in path:
a = 2
for elem in grid:
print(elem)
| 350 | 0 |
"""simple docstring"""
from manim import *
class __a ( lowerCAmelCase__ ):
def snake_case_ ( self ):
_lowerCamelCase = Rectangle(height=0.5 , width=0.5 )
_lowerCamelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowerCamelCase = [mem.copy() for i in range(6 )]
_lowerCamelCase = [mem.copy() for i in range(6 )]
_lowerCamelCase = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCamelCase = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCamelCase = VGroup(a__ , a__ ).arrange(a__ , buff=0 )
_lowerCamelCase = Text('CPU' , font_size=24 )
_lowerCamelCase = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a__ )
_lowerCamelCase = [mem.copy() for i in range(4 )]
_lowerCamelCase = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCamelCase = Text('GPU' , font_size=24 )
_lowerCamelCase = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
gpu.move_to([-1, -1, 0] )
self.add(a__ )
_lowerCamelCase = [mem.copy() for i in range(6 )]
_lowerCamelCase = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCamelCase = Text('Model' , font_size=24 )
_lowerCamelCase = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
model.move_to([3, -1.0, 0] )
self.add(a__ )
_lowerCamelCase = []
for i, rect in enumerate(a__ ):
rect.set_stroke(a__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_lowerCamelCase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(a__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=a__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=a__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=a__ , buff=0.0 )
self.add(a__ )
cpu_targs.append(a__ )
_lowerCamelCase = [mem.copy() for i in range(6 )]
_lowerCamelCase = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCamelCase = Text('Loaded Checkpoint' , font_size=24 )
_lowerCamelCase = Group(a__ , a__ ).arrange(a__ , aligned_edge=a__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_lowerCamelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCamelCase = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a__ , a__ )
_lowerCamelCase = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(a__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_lowerCamelCase = MarkupText(
F'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a__ ) , Write(a__ ) )
self.play(Write(a__ , run_time=1 ) , Create(a__ , run_time=1 ) )
_lowerCamelCase = []
_lowerCamelCase = []
for i, rect in enumerate(a__ ):
_lowerCamelCase = fill.copy().set_fill(a__ , opacity=0.7 )
target.move_to(a__ )
first_animations.append(GrowFromCenter(a__ , run_time=1 ) )
_lowerCamelCase = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(a__ , run_time=1.5 ) )
self.play(*a__ )
self.play(*a__ )
self.wait()
| 716 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def SCREAMING_SNAKE_CASE_ ( )-> Generator[int, None, None]:
_lowerCamelCase = {}
_lowerCamelCase = 2
while True:
_lowerCamelCase = factor_map.pop(snake_case , snake_case )
if factor:
_lowerCamelCase = factor + prime
while x in factor_map:
x += factor
_lowerCamelCase = factor
else:
_lowerCamelCase = prime
yield prime
prime += 1
def SCREAMING_SNAKE_CASE_ ( snake_case : float = 1e10 )-> int:
_lowerCamelCase = sieve()
_lowerCamelCase = 1
while True:
_lowerCamelCase = next(snake_case )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(snake_case )
n += 2
if __name__ == "__main__":
print(solution())
| 222 | 0 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : str = model.config
A : Tuple = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
A : Any = MBartConfig(
is_decoder=snake_case__ , is_encoder_decoder=snake_case__ , add_cross_attention=snake_case__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=snake_case__ , add_final_layer_norm=snake_case__ , )
return encoder_config, decoder_config
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if "encoder.model" in name:
A : Dict = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
A : Any = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
A : int = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
A : List[Any] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
A : Union[str, Any] = '''encoder.''' + name
if "attn.proj" in name:
A : Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
A : int = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
A : Optional[Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
A : List[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
A : List[str] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A : Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
A : List[Any] = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
A : Dict = '''encoder.layernorm.bias'''
return name
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A : Dict = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
A : Any = key.split('''.''' )
A : Dict = int(key_split[3] )
A : int = int(key_split[5] )
A : Union[str, Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A : List[str] = val[:dim, :]
A : Optional[int] = val[dim : dim * 2, :]
A : str = val[-dim:, :]
else:
A : List[str] = val[:dim]
A : Tuple = val[dim : dim * 2]
A : List[str] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
A : Any = val
return orig_state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__=None , snake_case__=False ):
'''simple docstring'''
A : Optional[int] = DonutModel.from_pretrained(snake_case__ ).eval()
# load HuggingFace model
A, A : Union[str, Any] = get_configs(snake_case__ )
A : int = DonutSwinModel(snake_case__ )
A : Optional[Any] = MBartForCausalLM(snake_case__ )
A : Any = VisionEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ )
model.eval()
A : Any = original_model.state_dict()
A : Union[str, Any] = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
# verify results on scanned document
A : List[str] = load_dataset('''hf-internal-testing/example-documents''' )
A : Optional[Any] = dataset['''test'''][0]['''image'''].convert('''RGB''' )
A : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained(snake_case__ , from_slow=snake_case__ )
A : Optional[Any] = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
A : int = DonutProcessor(snake_case__ , snake_case__ )
A : Dict = processor(snake_case__ , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
A : int = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
A : Optional[Any] = '''When is the coffee break?'''
A : Optional[Any] = task_prompt.replace('''{user_input}''' , snake_case__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
A : Any = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
A : str = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
A : Optional[Any] = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
A : List[str] = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
A : List[Any] = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
A : Union[str, Any] = original_model.decoder.tokenizer(snake_case__ , add_special_tokens=snake_case__ , return_tensors='''pt''' )[
'''input_ids'''
]
A : List[str] = original_model.encoder.model.patch_embed(snake_case__ )
A, A : Dict = model.encoder.embeddings(snake_case__ )
assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
# verify encoder hidden states
A : List[str] = original_model.encoder(snake_case__ )
A : Optional[Any] = model.encoder(snake_case__ ).last_hidden_state
assert torch.allclose(snake_case__ , snake_case__ , atol=1E-2 )
# verify decoder hidden states
A : Optional[int] = original_model(snake_case__ , snake_case__ , snake_case__ ).logits
A : List[Any] = model(snake_case__ , decoder_input_ids=snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
lowercase : Optional[Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 634 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Optional[int] = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class A ( __snake_case ):
__magic_name__ = '''switch_transformers'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , SCREAMING_SNAKE_CASE=32128 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0.01 , SCREAMING_SNAKE_CASE="float32" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=0.001 , SCREAMING_SNAKE_CASE=0.001 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE="relu" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , **SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
A : Union[str, Any] = vocab_size
A : Any = d_model
A : Dict = d_kv
A : Optional[Any] = d_ff
A : List[Any] = num_sparse_encoder_layers
A : List[Any] = num_layers
A : Optional[int] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A : List[str] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
A : int = self.num_layers // self.num_sparse_encoder_layers
else:
A : int = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
A : str = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
A : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
A : List[Any] = num_heads
A : str = num_experts
A : Dict = expert_capacity
A : Optional[int] = router_bias
A : Any = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
A : Dict = router_dtype
A : int = router_ignore_padding_tokens
A : List[Any] = relative_attention_num_buckets
A : Dict = relative_attention_max_distance
A : Optional[int] = dropout_rate
A : Dict = layer_norm_epsilon
A : Optional[int] = initializer_factor
A : Union[str, Any] = feed_forward_proj
A : Any = use_cache
A : Tuple = add_router_probs
A : List[str] = router_z_loss_coef
A : str = router_aux_loss_coef
A : Union[str, Any] = self.feed_forward_proj.split('''-''' )
A : Optional[Any] = act_info[-1]
A : Union[str, Any] = act_info[0] == '''gated'''
if len(SCREAMING_SNAKE_CASE ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A : str = '''gelu_new'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
| 634 | 1 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( _A : Dict , _A : Any , _A : Dict=None )-> Tuple:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
A__ = nn.Parameter(_A )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
A__ = nn.Parameter(_A )
def UpperCamelCase ( _A : int , _A : Optional[int] , _A : Any )-> Optional[int]:
"""simple docstring"""
A__ = np.asarray(weights[0] )
A__ = np.asarray(weights[1] )
A__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_A ).transpose(1 , 2 ).contiguous().view(-1 , _A ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_A ).transpose(1 , 2 ).contiguous().view(-1 , _A ) , )
set_param(
torch_layer.output.dense , torch.tensor(_A ).view(-1 , _A ).contiguous().transpose(0 , 1 ) , )
def UpperCamelCase ( _A : Optional[Any] , _A : Tuple , _A : List[Any] )-> Optional[Any]:
"""simple docstring"""
A__ = np.asarray(weights[0] )
A__ = np.asarray(weights[1] )
A__ = np.asarray(weights[2] )
A__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_A ).transpose(1 , 2 ).contiguous().view(-1 , _A ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_A ).transpose(1 , 2 ).contiguous().view(-1 , _A ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_A ).transpose(1 , 2 ).contiguous().view(-1 , _A ) , )
set_param(
torch_layer.output.dense , torch.tensor(_A ).view(-1 , _A ).contiguous().transpose(0 , 1 ) , )
def UpperCamelCase ( _A : Dict , _A : str , _A : List[Any] )-> Any:
"""simple docstring"""
A__ = weights[0][0][0]
A__ = np.asarray(layer_norm_a[0] )
A__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_A ) , torch.tensor(_A ) , )
# lsh weights + output
A__ = weights[0][1]
if len(_A ) < 4:
set_layer_weights_in_torch_lsh(_A , torch_block.attention , _A )
else:
set_layer_weights_in_torch_local(_A , torch_block.attention , _A )
# intermediate weighs
A__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(_A ) == 4:
A__ = intermediate_weights[2]
# layernorm 2
A__ = np.asarray(intermediate_weights[0][0] )
A__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_A ) , torch.tensor(_A ) , )
# intermediate dense
A__ = np.asarray(intermediate_weights[1][0] )
A__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_A ).transpose(0 , 1 ).contiguous() , torch.tensor(_A ) , )
# intermediate out
A__ = np.asarray(intermediate_weights[4][0] )
A__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_A ).transpose(0 , 1 ).contiguous() , torch.tensor(_A ) , )
def UpperCamelCase ( _A : List[Any] , _A : int , _A : List[Any] )-> Union[str, Any]:
"""simple docstring"""
A__ = torch_model.reformer
# word embeds
A__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_A ) , )
if isinstance(weights[3] , _A ):
A__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
A__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
A__ = nn.Parameter(torch.tensor(_A ) )
A__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_A ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
A__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_A , _A , _A )
# output layer norm
A__ = np.asarray(weights[7][0] )
A__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_A ) , torch.tensor(_A ) , )
# output embeddings
A__ = np.asarray(weights[9][0] )
A__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_A ).transpose(0 , 1 ).contiguous() , torch.tensor(_A ) , )
def UpperCamelCase ( _A : Tuple , _A : Any , _A : Any )-> List[Any]:
"""simple docstring"""
A__ = ReformerConfig.from_json_file(_A )
print(f"""Building PyTorch model from configuration: {config}""" )
A__ = ReformerModelWithLMHead(_A )
with open(_A , "rb" ) as f:
A__ = pickle.load(_A )["weights"]
set_model_weights_in_torch(_A , _A , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase_ : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 232 |
def UpperCamelCase ( _A : list[list[int]] , _A : int , _A : int , _A : list[int] )-> bool:
"""simple docstring"""
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def UpperCamelCase ( _A : list[list[int]] , _A : list[int] , _A : int )-> bool:
"""simple docstring"""
if curr_ind == len(_A ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_A ) ):
if valid_connection(_A , _A , _A , _A ):
# Insert current vertex into path as next transition
A__ = next_ver
# Validate created path
if util_hamilton_cycle(_A , _A , curr_ind + 1 ):
return True
# Backtrack
A__ = -1
return False
def UpperCamelCase ( _A : list[list[int]] , _A : int = 0 )-> list[int]:
"""simple docstring"""
A__ = [-1] * (len(_A ) + 1)
# initialize start and end of path with starting index
A__ = A__ = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_A , _A , 1 ) else []
| 232 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
UpperCamelCase = {
'do_resize': True,
'size': {'height': 2_2_4, 'width': 2_2_4},
'do_center_crop': True,
'crop_size': {'height': 1_8, 'width': 1_8},
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
'do_convert_rgb': True,
}
UpperCamelCase = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : str , **UpperCamelCase__ : Any ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def A ( self : Union[str, Any] , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def A ( self : Union[str, Any] , **UpperCamelCase__ : str ):
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCamelCase = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = self.get_image_processor()
UpperCamelCase = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ )
UpperCamelCase = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
UpperCamelCase = self.get_image_processor(do_normalize=UpperCamelCase__ )
UpperCamelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=UpperCamelCase__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = image_processor(UpperCamelCase__ , return_tensors='np' )
UpperCamelCase = processor(images=UpperCamelCase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCamelCase = 'Alexandra,T-shirt的价格是15便士。'
UpperCamelCase = processor(text=UpperCamelCase__ )
UpperCamelCase = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCamelCase = 'Alexandra,T-shirt的价格是15便士。'
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase = processor.batch_decode(UpperCamelCase__ )
UpperCamelCase = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCamelCase = 'Alexandra,T-shirt的价格是15便士。'
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 430 |
'''simple docstring'''
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Linear(3 , 4 )
UpperCamelCase = nn.BatchNormad(4 )
UpperCamelCase = nn.Linear(4 , 5 )
def A ( self : str , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(UpperCamelCase__ ) ) )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , model.state_dict() )
UpperCamelCase = os.path.join(UpperCamelCase__ , 'index.json' )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
UpperCamelCase = os.path.join(UpperCamelCase__ , f"""{key}.dat""" )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# TODO: add tests on the fact weights are properly loaded
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
UpperCamelCase = torch.randn(2 , 3 , dtype=UpperCamelCase__ )
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = offload_weight(UpperCamelCase__ , 'weight' , UpperCamelCase__ , {} )
UpperCamelCase = os.path.join(UpperCamelCase__ , 'weight.dat' )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
self.assertDictEqual(UpperCamelCase__ , {'weight': {'shape': [2, 3], 'dtype': str(UpperCamelCase__ ).split('.' )[1]}} )
UpperCamelCase = load_offloaded_weight(UpperCamelCase__ , index['weight'] )
self.assertTrue(torch.equal(UpperCamelCase__ , UpperCamelCase__ ) )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = ModelForTest()
UpperCamelCase = model.state_dict()
UpperCamelCase = {k: v for k, v in state_dict.items() if 'linear2' not in k}
UpperCamelCase = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = OffloadedWeightsLoader(state_dict=UpperCamelCase__ , save_folder=UpperCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase__ , weight_map[key] ) )
UpperCamelCase = {k: v for k, v in state_dict.items() if 'weight' in k}
UpperCamelCase = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = OffloadedWeightsLoader(state_dict=UpperCamelCase__ , save_folder=UpperCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase__ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , UpperCamelCase__ )
# Duplicates are removed
UpperCamelCase = OffloadedWeightsLoader(state_dict=UpperCamelCase__ , save_folder=UpperCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase__ , weight_map[key] ) )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = {'a.1': 0, 'a.10': 1, 'a.2': 2}
UpperCamelCase = extract_submodules_state_dict(UpperCamelCase__ , ['a.1', 'a.2'] )
self.assertDictEqual(UpperCamelCase__ , {'a.1': 0, 'a.2': 2} )
UpperCamelCase = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
UpperCamelCase = extract_submodules_state_dict(UpperCamelCase__ , ['a.1', 'a.2'] )
self.assertDictEqual(UpperCamelCase__ , {'a.1.a': 0, 'a.2.a': 2} )
| 430 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Optional[int] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 718 |
from __future__ import annotations
import time
__lowerCamelCase : str = list[tuple[int, int]]
__lowerCamelCase : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : int , __A : int , __A : int , __A : int , __A : Node | None ):
snake_case__ : Optional[int] = pos_x
snake_case__ : Dict = pos_y
snake_case__ : int = (pos_y, pos_x)
snake_case__ : Optional[int] = goal_x
snake_case__ : Tuple = goal_y
snake_case__ : str = parent
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : tuple[int, int] , __A : tuple[int, int] ):
snake_case__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , __A )
snake_case__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , __A )
snake_case__ : int = [self.start]
snake_case__ : Union[str, Any] = False
def _lowercase ( self : Dict ):
while self.node_queue:
snake_case__ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case__ : Optional[Any] = True
return self.retrace_path(__A )
snake_case__ : int = self.get_successors(__A )
for node in successors:
self.node_queue.append(__A )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Union[str, Any] , __A : Node ):
snake_case__ : str = []
for action in delta:
snake_case__ : str = parent.pos_x + action[1]
snake_case__ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__A , __A , self.target.pos_y , self.target.pos_x , __A ) )
return successors
def _lowercase ( self : Optional[Any] , __A : Node | None ):
snake_case__ : Tuple = node
snake_case__ : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ : Tuple = current_node.parent
path.reverse()
return path
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Dict , __A : str , __A : int ):
snake_case__ : str = BreadthFirstSearch(__A , __A )
snake_case__ : int = BreadthFirstSearch(__A , __A )
snake_case__ : Tuple = False
def _lowercase ( self : Optional[Any] ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case__ : Any = self.fwd_bfs.node_queue.pop(0 )
snake_case__ : List[str] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case__ : List[str] = True
return self.retrace_bidirectional_path(
__A , __A )
snake_case__ : Union[str, Any] = current_bwd_node
snake_case__ : Dict = current_fwd_node
snake_case__ : List[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(__A ),
self.bwd_bfs: self.bwd_bfs.get_successors(__A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowercase ( self : Any , __A : Node , __A : Node ):
snake_case__ : List[str] = self.fwd_bfs.retrace_path(__A )
snake_case__ : Optional[Any] = self.bwd_bfs.retrace_path(__A )
bwd_path.pop()
bwd_path.reverse()
snake_case__ : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowerCamelCase : str = (0, 0)
__lowerCamelCase : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCamelCase : Any = time.time()
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(init, goal)
__lowerCamelCase : str = bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
__lowerCamelCase : Optional[Any] = time.time()
__lowerCamelCase : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
__lowerCamelCase : str = bd_bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 25 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCamelCase ( _a ) -> str:
'''simple docstring'''
if (
(cp >= 0X4e_00 and cp <= 0X9f_ff)
or (cp >= 0X34_00 and cp <= 0X4d_bf) #
or (cp >= 0X2_00_00 and cp <= 0X2_a6_df) #
or (cp >= 0X2_a7_00 and cp <= 0X2_b7_3f) #
or (cp >= 0X2_b7_40 and cp <= 0X2_b8_1f) #
or (cp >= 0X2_b8_20 and cp <= 0X2_ce_af) #
or (cp >= 0Xf9_00 and cp <= 0Xfa_ff)
or (cp >= 0X2_f8_00 and cp <= 0X2_fa_1f) #
): #
return True
return False
def UpperCamelCase ( _a ) -> List[Any]:
'''simple docstring'''
for char in word:
lowercase_ :List[Any] = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def UpperCamelCase ( _a ) -> str:
'''simple docstring'''
lowercase_ :Tuple = set()
for token in tokens:
lowercase_ :int = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
lowercase_ :Optional[Any] = list(__UpperCamelCase )
return word_list
def UpperCamelCase ( _a , _a ) -> str:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowercase_ :Any = max([len(__UpperCamelCase ) for w in chinese_word_set] )
lowercase_ :Union[str, Any] = bert_tokens
lowercase_ , lowercase_ :int = 0, len(__UpperCamelCase )
while start < end:
lowercase_ :List[Any] = True
if is_chinese(bert_word[start] ):
lowercase_ :Dict = min(end - start , __UpperCamelCase )
for i in range(__UpperCamelCase , 1 , -1 ):
lowercase_ :Tuple = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase_ :Tuple = '''##''' + bert_word[j]
lowercase_ :Any = start + i
lowercase_ :Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def UpperCamelCase ( _a , _a , _a ) -> Dict:
'''simple docstring'''
lowercase_ :Optional[Any] = []
for i in range(0 , len(__UpperCamelCase ) , 1_0_0 ):
lowercase_ :Dict = ltp_tokenizer.seg(lines[i : i + 1_0_0] )[0]
lowercase_ :Any = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
lowercase_ :List[Any] = []
for i in range(0 , len(__UpperCamelCase ) , 1_0_0 ):
lowercase_ :int = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=__UpperCamelCase , truncation=__UpperCamelCase , max_length=5_1_2 )
bert_res.extend(res['''input_ids'''] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
lowercase_ :Optional[int] = []
for input_ids, chinese_word in zip(__UpperCamelCase , __UpperCamelCase ):
lowercase_ :int = []
for id in input_ids:
lowercase_ :int = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
lowercase_ :List[str] = add_sub_symbol(__UpperCamelCase , __UpperCamelCase )
lowercase_ :Any = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
lowercase_ :Dict = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def UpperCamelCase ( _a ) -> Any:
'''simple docstring'''
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ :Optional[int] = f.readlines()
lowercase_ :List[Any] = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase_ :List[Any] = LTP(args.ltp ) # faster in GPU device
lowercase_ :Dict = BertTokenizer.from_pretrained(args.bert )
lowercase_ :str = prepare_ref(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
lowercase_ :str = [json.dumps(__UpperCamelCase ) + '''\n''' for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
main(args)
| 257 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _lowercase ( __UpperCAmelCase ):
_lowerCamelCase = DistilBertTokenizer
_lowerCamelCase = DistilBertTokenizerFast
_lowerCamelCase = True
@slow
def lowerCAmelCase__ ( self ):
__magic_name__ = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
__magic_name__ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase_ )
__magic_name__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase_ )
__magic_name__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
__magic_name__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 490 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A = logging.get_logger(__name__)
A = {"""vocab_file""": """sentencepiece.model"""}
A = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
}
A = {
"""google/rembert""": 256,
}
class lowerCamelCase__ ( __magic_name__ ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : str=True , UpperCamelCase_ : int="[CLS]" , UpperCamelCase_ : List[Any]="[SEP]" , UpperCamelCase_ : str="[UNK]" , UpperCamelCase_ : Any="[SEP]" , UpperCamelCase_ : Union[str, Any]="[PAD]" , UpperCamelCase_ : List[Any]="[CLS]" , UpperCamelCase_ : Union[str, Any]="[MASK]" , **UpperCamelCase_ : Dict , ):
"""simple docstring"""
super().__init__(
do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : Any = do_lower_case
__UpperCAmelCase : str = remove_space
__UpperCAmelCase : str = keep_accents
__UpperCAmelCase : Dict = vocab_file
__UpperCAmelCase : Union[str, Any] = spm.SentencePieceProcessor()
self.sp_model.Load(UpperCamelCase_)
@property
def a_ ( self : List[str]):
"""simple docstring"""
return len(self.sp_model)
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : str = {self.convert_ids_to_tokens(UpperCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Any):
"""simple docstring"""
__UpperCAmelCase : str = self.__dict__.copy()
__UpperCAmelCase : Tuple = None
return state
def __setstate__( self : Any , UpperCamelCase_ : Dict):
"""simple docstring"""
__UpperCAmelCase : List[str] = d
__UpperCAmelCase : int = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file)
def a_ ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any]=False):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.sp_model.EncodeAsPieces(UpperCamelCase_)
return pieces
def a_ ( self : Tuple , UpperCamelCase_ : Dict):
"""simple docstring"""
return self.sp_model.PieceToId(UpperCamelCase_)
def a_ ( self : Tuple , UpperCamelCase_ : List[Any]):
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCamelCase_)
def a_ ( self : Optional[Any] , UpperCamelCase_ : Any):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = self.sp_model.decode_pieces(UpperCamelCase_)
return out_string
def a_ ( self : str , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None):
"""simple docstring"""
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a_ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model.")
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase_)) + [1] + ([0] * len(UpperCamelCase_)) + [1]
return [1] + ([0] * len(UpperCamelCase_)) + [1]
def a_ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None):
"""simple docstring"""
__UpperCAmelCase : Tuple = [self.sep_token_id]
__UpperCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def a_ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_):
logger.error("Vocabulary path ({}) should be a directory".format(UpperCamelCase_))
return
__UpperCAmelCase : Union[str, Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_):
copyfile(self.vocab_file , UpperCamelCase_)
return (out_vocab_file,)
| 712 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class a__ :
lowercase_ = 42
# setable values
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
@classmethod
def a_ ( cls : List[str] , UpperCamelCase_ : CommonSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray):
"""simple docstring"""
return cls(common=UpperCamelCase_ , init_noise_sigma=UpperCamelCase_ , timesteps=UpperCamelCase_)
@dataclass
class a__ ( __magic_name__ ):
lowercase_ = 42
class a__ ( __magic_name__ , __magic_name__ ):
lowercase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowercase_ = 42
@property
def a_ ( self : Optional[int]):
"""simple docstring"""
return True
@register_to_config
def __init__( self : str , UpperCamelCase_ : int = 1000 , UpperCamelCase_ : float = 0.0001 , UpperCamelCase_ : float = 0.02 , UpperCamelCase_ : str = "linear" , UpperCamelCase_ : Optional[jnp.ndarray] = None , UpperCamelCase_ : str = "fixed_small" , UpperCamelCase_ : bool = True , UpperCamelCase_ : str = "epsilon" , UpperCamelCase_ : jnp.dtype = jnp.floataa , ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = dtype
def a_ ( self : Optional[Any] , UpperCamelCase_ : Optional[CommonSchedulerState] = None):
"""simple docstring"""
if common is None:
__UpperCAmelCase : Tuple = CommonSchedulerState.create(self)
# standard deviation of the initial noise distribution
__UpperCAmelCase : Tuple = jnp.array(1.0 , dtype=self.dtype)
__UpperCAmelCase : Any = jnp.arange(0 , self.config.num_train_timesteps).round()[::-1]
return DDPMSchedulerState.create(
common=UpperCamelCase_ , init_noise_sigma=UpperCamelCase_ , timesteps=UpperCamelCase_ , )
def a_ ( self : Optional[Any] , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : Optional[int] = None):
"""simple docstring"""
return sample
def a_ ( self : Any , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : int , UpperCamelCase_ : Tuple = ()):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__UpperCAmelCase : List[str] = (jnp.arange(0 , UpperCamelCase_) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ , )
def a_ ( self : Any , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[int]=None):
"""simple docstring"""
__UpperCAmelCase : List[str] = state.common.alphas_cumprod[t]
__UpperCAmelCase : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype))
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__UpperCAmelCase : Tuple = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__UpperCAmelCase : Optional[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__UpperCAmelCase : str = jnp.clip(UpperCamelCase_ , a_min=1e-20)
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__UpperCAmelCase : Optional[int] = jnp.log(jnp.clip(UpperCamelCase_ , a_min=1e-20))
elif variance_type == "fixed_large":
__UpperCAmelCase : Tuple = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__UpperCAmelCase : str = jnp.log(state.common.betas[t])
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__UpperCAmelCase : Any = variance
__UpperCAmelCase : Union[str, Any] = state.common.betas[t]
__UpperCAmelCase : List[str] = (predicted_variance + 1) / 2
__UpperCAmelCase : int = frac * max_log + (1 - frac) * min_log
return variance
def a_ ( self : Optional[int] , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : Optional[jax.random.KeyArray] = None , UpperCamelCase_ : bool = True , ):
"""simple docstring"""
__UpperCAmelCase : Dict = timestep
if key is None:
__UpperCAmelCase : List[str] = jax.random.PRNGKey(0)
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__UpperCAmelCase , __UpperCAmelCase : int = jnp.split(UpperCamelCase_ , sample.shape[1] , axis=1)
else:
__UpperCAmelCase : List[str] = None
# 1. compute alphas, betas
__UpperCAmelCase : str = state.common.alphas_cumprod[t]
__UpperCAmelCase : str = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype))
__UpperCAmelCase : Tuple = 1 - alpha_prod_t
__UpperCAmelCase : int = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__UpperCAmelCase : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__UpperCAmelCase : Optional[int] = model_output
elif self.config.prediction_type == "v_prediction":
__UpperCAmelCase : Optional[int] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler.")
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__UpperCAmelCase : List[Any] = jnp.clip(UpperCamelCase_ , -1 , 1)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCAmelCase : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__UpperCAmelCase : Union[str, Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCAmelCase : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__UpperCAmelCase : int = jax.random.split(UpperCamelCase_ , num=1)
__UpperCAmelCase : Any = jax.random.normal(UpperCamelCase_ , shape=model_output.shape , dtype=self.dtype)
return (self._get_variance(UpperCamelCase_ , UpperCamelCase_ , predicted_variance=UpperCamelCase_) ** 0.5) * noise
__UpperCAmelCase : Tuple = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype))
__UpperCAmelCase : Tuple = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=UpperCamelCase_ , state=UpperCamelCase_)
def a_ ( self : Optional[Any] , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray , ):
"""simple docstring"""
return add_noise_common(state.common , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Union[str, Any] , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray , ):
"""simple docstring"""
return get_velocity_common(state.common , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
def __len__( self : int):
"""simple docstring"""
return self.config.num_train_timesteps
| 487 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( __lowerCamelCase : list[int] ): # This function is recursive
__UpperCAmelCase : Optional[Any] = len(__lowerCamelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__UpperCAmelCase : str = array[0]
__UpperCAmelCase : int = False
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[Any] = [element for element in array[i:] if element >= array[i]]
__UpperCAmelCase : List[str] = longest_subsequence(__lowerCamelCase )
if len(__lowerCamelCase ) > len(__lowerCamelCase ):
__UpperCAmelCase : str = temp_array
else:
i += 1
__UpperCAmelCase : Tuple = [element for element in array[1:] if element >= pivot]
__UpperCAmelCase : Union[str, Any] = [pivot, *longest_subsequence(__lowerCamelCase )]
if len(__lowerCamelCase ) > len(__lowerCamelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 256
# Modulus to hash a string
SCREAMING_SNAKE_CASE__ = 100_0003
def lowerCamelCase ( _snake_case : str ,_snake_case : str ):
'''simple docstring'''
lowercase__ = len(_snake_case )
lowercase__ = len(_snake_case )
if p_len > t_len:
return False
lowercase__ = 0
lowercase__ = 0
lowercase__ = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
lowercase__ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowercase__ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowercase__ = (modulus_power * alphabet_size) % modulus
for i in range(0 ,t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowercase__ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = "abc1abc12"
lowercase__ = "alskfjaldsabc1abc1abc12k23adsfabcabc"
lowercase__ = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_snake_case ,_snake_case ) and not rabin_karp(_snake_case ,_snake_case )
# Test 2)
lowercase__ = "ABABX"
lowercase__ = "ABABZABABYABABX"
assert rabin_karp(_snake_case ,_snake_case )
# Test 3)
lowercase__ = "AAAB"
lowercase__ = "ABAAAAAB"
assert rabin_karp(_snake_case ,_snake_case )
# Test 4)
lowercase__ = "abcdabcy"
lowercase__ = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_snake_case ,_snake_case )
# Test 5)
lowercase__ = "Lü"
lowercase__ = "Lüsai"
assert rabin_karp(_snake_case ,_snake_case )
lowercase__ = "Lue"
assert not rabin_karp(_snake_case ,_snake_case )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 267 | 0 |
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __A ( lowerCAmelCase_ = "isbn/0140328726" ):
_UpperCAmelCase : Optional[Any] = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
_UpperCAmelCase : Any = f"{olid} is not a valid Open Library olid"
raise ValueError(lowerCAmelCase_ )
return requests.get(f"https://openlibrary.org/{new_olid}.json" ).json()
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : int = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
_UpperCAmelCase : int = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_UpperCAmelCase : int = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
_UpperCAmelCase : Any = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = """, """.join(lowerCAmelCase_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowerCAmelCase_ : int = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.")
continue
print(F"\nSearching Open Library for ISBN: {isbn}...\n")
try:
lowerCAmelCase_ : str = summarize_book(get_openlibrary_data(F"isbn/{isbn}"))
print('''\n'''.join(F"{key}: {value}" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"Sorry, there are no results for ISBN: {isbn}.")
| 704 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : int = tmp_path / """cache"""
_UpperCAmelCase : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase : Tuple = JsonDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ ).read()
_check_json_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Any = tmp_path / """cache"""
_UpperCAmelCase : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : int = features.copy() if features else default_expected_features
_UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Any = JsonDatasetReader(lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_json_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Any = tmp_path / """cache"""
_UpperCAmelCase : Optional[Any] = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
_UpperCAmelCase : int = features.copy() if features else default_expected_features
_UpperCAmelCase : Optional[Any] = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Dict = JsonDatasetReader(lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
_UpperCAmelCase : Union[str, Any] = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
_UpperCAmelCase : Optional[Any] = features.copy()
_UpperCAmelCase : Any = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Tuple = tmp_path / """cache"""
_UpperCAmelCase : Optional[Any] = JsonDatasetReader(lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Dict = tmp_path / """cache"""
_UpperCAmelCase : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : List[Any] = JsonDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , split=lowerCAmelCase_ ).read()
_check_json_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if issubclass(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Any = jsonl_path
elif issubclass(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = [jsonl_path]
_UpperCAmelCase : int = tmp_path / """cache"""
_UpperCAmelCase : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : Any = JsonDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_json_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=("train",) ):
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
for split in splits:
_UpperCAmelCase : List[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = tmp_path / """cache"""
_UpperCAmelCase : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase : List[Any] = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ ).read()
_check_json_datasetdict(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = tmp_path / """cache"""
_UpperCAmelCase : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : List[str] = features.copy() if features else default_expected_features
_UpperCAmelCase : int = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Any = JsonDatasetReader({"""train""": jsonl_path} , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_json_datasetdict(lowerCAmelCase_ , lowerCAmelCase_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if split:
_UpperCAmelCase : str = {split: jsonl_path}
else:
_UpperCAmelCase : int = """train"""
_UpperCAmelCase : int = {"""train""": jsonl_path, """test""": jsonl_path}
_UpperCAmelCase : Optional[int] = tmp_path / """cache"""
_UpperCAmelCase : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase : Optional[Any] = JsonDatasetReader(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_json_datasetdict(lowerCAmelCase_ , lowerCAmelCase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __A ( lowerCAmelCase_ ):
return json.load(lowerCAmelCase_ )
def __A ( lowerCAmelCase_ ):
return [json.loads(lowerCAmelCase_ ) for line in buffer]
class __lowerCAmelCase :
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ ).write()
buffer.seek(0 )
_UpperCAmelCase : Optional[int] = load_json_function(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert isinstance(exported_content[0] , lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == 1_0
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ , orient=lowerCAmelCase__ ).write()
buffer.seek(0 )
_UpperCAmelCase : str = load_json(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCAmelCase__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCAmelCase__ ) == 1_0
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ , num_proc=2 ).write()
buffer.seek(0 )
_UpperCAmelCase : Optional[int] = load_json_function(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert isinstance(exported_content[0] , lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == 1_0
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ , orient=lowerCAmelCase__ , num_proc=2 ).write()
buffer.seek(0 )
_UpperCAmelCase : Optional[Any] = load_json(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCAmelCase__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCAmelCase__ ) == 1_0
def snake_case_ (self , lowerCAmelCase__ ):
with pytest.raises(lowerCAmelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Dict = tmp_path_factory.mktemp("""data""" ) / F"test.json.{extension}"
_UpperCAmelCase : List[Any] = str(shared_datadir / F"test_file.json.{extension}" )
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , compression=lowerCAmelCase__ ).write()
with fsspec.open(lowerCAmelCase__ , """rb""" , compression="""infer""" ) as f:
_UpperCAmelCase : str = f.read()
with fsspec.open(lowerCAmelCase__ , """rb""" , compression="""infer""" ) as f:
_UpperCAmelCase : Optional[int] = f.read()
assert exported_content == original_content
| 156 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _snake_case ( A ) -> Optional[Any]:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class a__ ( a__ ):
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=lowerCamelCase_ , default=lowerCamelCase_ , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=lowerCamelCase_ , help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCamelCase_ )
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
lowerCAmelCase__ = model
lowerCAmelCase__ = cache
lowerCAmelCase__ = force
lowerCAmelCase__ = trust_remote_code
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) | 90 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar("T")
class __SCREAMING_SNAKE_CASE (Generic[T] ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ ):
"""simple docstring"""
a_ = data
a_ = None
def __str__( self ):
"""simple docstring"""
return f'{self.data}'
class __SCREAMING_SNAKE_CASE (Generic[T] ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
a_ = None
def __iter__( self ):
"""simple docstring"""
a_ = self.top
while node:
yield node.data
a_ = node.next
def __str__( self ):
"""simple docstring"""
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self ):
"""simple docstring"""
return len(tuple(iter(self ) ) )
def _a ( self ):
"""simple docstring"""
return self.top is None
def _a ( self , UpperCamelCase__ ):
"""simple docstring"""
a_ = Node(UpperCamelCase__ )
if not self.is_empty():
a_ = self.top
a_ = node
def _a ( self ):
"""simple docstring"""
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCamelCase__ )
a_ = self.top
a_ = self.top.next
return pop_node.data
def _a ( self ):
"""simple docstring"""
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def _a ( self ):
"""simple docstring"""
a_ = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 536 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _lowercase :
"""simple docstring"""
lowercase__ = LEDConfig
lowercase__ = {}
lowercase__ = '''gelu'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any]=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : List[str]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : int=37 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Optional[int]=20 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Tuple=4 , ) -> str:
'''simple docstring'''
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =eos_token_id
__UpperCamelCase =pad_token_id
__UpperCamelCase =bos_token_id
__UpperCamelCase =attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__UpperCamelCase =self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__UpperCamelCase =(
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCamelCase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase =tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__UpperCamelCase =prepare_led_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tf.concat(
[tf.zeros_like(UpperCamelCase__ )[:, :-1], tf.ones_like(UpperCamelCase__ )[:, -1:]] , axis=-1 , )
__UpperCamelCase =global_attention_mask
return config, inputs_dict
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ) -> Any:
'''simple docstring'''
__UpperCamelCase =TFLEDModel(config=UpperCamelCase__ ).get_decoder()
__UpperCamelCase =inputs_dict['''input_ids''']
__UpperCamelCase =input_ids[:1, :]
__UpperCamelCase =inputs_dict['''attention_mask'''][:1, :]
__UpperCamelCase =1
# first forward pass
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
__UpperCamelCase , __UpperCamelCase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase =ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCamelCase =tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCamelCase =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCamelCase =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCamelCase =output_from_no_past[:, -3:, random_slice_idx]
__UpperCamelCase =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3 )
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Any=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : int=None , __UpperCamelCase : Tuple=None , ):
"""simple docstring"""
if attention_mask is None:
__UpperCamelCase =tf.cast(tf.math.not_equal(__UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__UpperCamelCase =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _lowercase ( __a , __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowercase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowercase__ = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase_ ( self : int ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =TFLEDModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
__UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =tf.zeros_like(inputs_dict['''attention_mask'''] )
__UpperCamelCase =2
__UpperCamelCase =tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
__UpperCamelCase =True
__UpperCamelCase =self.model_tester.seq_length
__UpperCamelCase =self.model_tester.encoder_seq_length
def check_decoder_attentions_output(UpperCamelCase__ : Tuple ):
__UpperCamelCase =outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(UpperCamelCase__ : Dict ):
__UpperCamelCase =[t.numpy() for t in outputs.encoder_attentions]
__UpperCamelCase =[t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__UpperCamelCase =len(UpperCamelCase__ )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
if self.is_encoder_decoder:
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_decoder_attentions_output(UpperCamelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__UpperCamelCase =True
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
# Check attention is always last and order is fine
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase (__UpperCamelCase : str ):
"""simple docstring"""
return tf.constant(__UpperCamelCase , dtype=tf.intaa )
__lowercase = 1e-4
@slow
@require_tf
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
__UpperCamelCase =_long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__UpperCamelCase =_long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__UpperCamelCase =prepare_led_inputs_dict(model.config , UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =model(**UpperCamelCase__ )[0]
__UpperCamelCase =(1, 1024, 768)
self.assertEqual(output.shape , UpperCamelCase__ )
# change to expected output here
__UpperCamelCase =tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1E-3 )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
__UpperCamelCase =_long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__UpperCamelCase =_long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__UpperCamelCase =prepare_led_inputs_dict(model.config , UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =model(**UpperCamelCase__ )[0]
__UpperCamelCase =(1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , UpperCamelCase__ )
# change to expected output here
__UpperCamelCase =tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1E-3 , rtol=1E-3 )
| 296 | """simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _lowercase :
"""simple docstring"""
lowercase__ = LEDConfig
lowercase__ = {}
lowercase__ = '''gelu'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any]=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : List[str]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : int=37 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Optional[int]=20 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Tuple=4 , ) -> str:
'''simple docstring'''
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =eos_token_id
__UpperCamelCase =pad_token_id
__UpperCamelCase =bos_token_id
__UpperCamelCase =attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__UpperCamelCase =self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__UpperCamelCase =(
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCamelCase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase =tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__UpperCamelCase =prepare_led_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tf.concat(
[tf.zeros_like(UpperCamelCase__ )[:, :-1], tf.ones_like(UpperCamelCase__ )[:, -1:]] , axis=-1 , )
__UpperCamelCase =global_attention_mask
return config, inputs_dict
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ) -> Any:
'''simple docstring'''
__UpperCamelCase =TFLEDModel(config=UpperCamelCase__ ).get_decoder()
__UpperCamelCase =inputs_dict['''input_ids''']
__UpperCamelCase =input_ids[:1, :]
__UpperCamelCase =inputs_dict['''attention_mask'''][:1, :]
__UpperCamelCase =1
# first forward pass
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
__UpperCamelCase , __UpperCamelCase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase =ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCamelCase =tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCamelCase =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCamelCase =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCamelCase =output_from_no_past[:, -3:, random_slice_idx]
__UpperCamelCase =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3 )
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Any=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : int=None , __UpperCamelCase : Tuple=None , ):
"""simple docstring"""
if attention_mask is None:
__UpperCamelCase =tf.cast(tf.math.not_equal(__UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__UpperCamelCase =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _lowercase ( __a , __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowercase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowercase__ = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase_ ( self : int ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =TFLEDModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
__UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =tf.zeros_like(inputs_dict['''attention_mask'''] )
__UpperCamelCase =2
__UpperCamelCase =tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
__UpperCamelCase =True
__UpperCamelCase =self.model_tester.seq_length
__UpperCamelCase =self.model_tester.encoder_seq_length
def check_decoder_attentions_output(UpperCamelCase__ : Tuple ):
__UpperCamelCase =outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(UpperCamelCase__ : Dict ):
__UpperCamelCase =[t.numpy() for t in outputs.encoder_attentions]
__UpperCamelCase =[t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__UpperCamelCase =len(UpperCamelCase__ )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
if self.is_encoder_decoder:
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_decoder_attentions_output(UpperCamelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__UpperCamelCase =True
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
# Check attention is always last and order is fine
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase (__UpperCamelCase : str ):
"""simple docstring"""
return tf.constant(__UpperCamelCase , dtype=tf.intaa )
__lowercase = 1e-4
@slow
@require_tf
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
__UpperCamelCase =_long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__UpperCamelCase =_long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__UpperCamelCase =prepare_led_inputs_dict(model.config , UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =model(**UpperCamelCase__ )[0]
__UpperCamelCase =(1, 1024, 768)
self.assertEqual(output.shape , UpperCamelCase__ )
# change to expected output here
__UpperCamelCase =tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1E-3 )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
__UpperCamelCase =_long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__UpperCamelCase =_long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__UpperCamelCase =prepare_led_inputs_dict(model.config , UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =model(**UpperCamelCase__ )[0]
__UpperCamelCase =(1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , UpperCamelCase__ )
# change to expected output here
__UpperCamelCase =tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1E-3 , rtol=1E-3 )
| 296 | 1 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowercase__ ( lowerCAmelCase__ : Namespace ) -> str:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__UpperCAmelCase = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class __UpperCAmelCase ( _lowerCamelCase ):
@staticmethod
def UpperCAmelCase ( a_ : ArgumentParser ) -> int:
'''simple docstring'''
a__ : int = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=a_ , required=a_ , help="Model\'s type." )
train_parser.add_argument(
"--tf_checkpoint" , type=a_ , required=a_ , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=a_ , required=a_ , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=a_ , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=a_ , default=a_ , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=a_ )
def __init__( self : Any , a_ : str , a_ : str , a_ : str , a_ : str , a_ : str , *a_ : List[str] , ) -> Any:
'''simple docstring'''
a__ : Any = logging.get_logger("transformers-cli/converting" )
self._logger.info(F"Loading model {model_type}" )
a__ : int = model_type
a__ : Optional[Any] = tf_checkpoint
a__ : Any = pytorch_dump_output
a__ : Dict = config
a__ : List[str] = finetuning_task_name
def UpperCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(a_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
if "ckpt" in self._tf_checkpoint.lower():
a__ : Any = self._tf_checkpoint
a__ : List[Any] = ""
else:
a__ : int = self._tf_checkpoint
a__ : Dict = ""
convert_transfo_xl_checkpoint_to_pytorch(
a_ , self._config , self._pytorch_dump_output , a_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" ) | 642 | '''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_UpperCamelCase : str = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
_UpperCamelCase : int = {'facebook/blenderbot-3B': 1_28}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __snake_case ( ):
__UpperCAmelCase = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__UpperCAmelCase = bs[:]
__UpperCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def __snake_case ( lowerCAmelCase : List[Any] ):
__UpperCAmelCase = set()
__UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase = char
return pairs
class _lowercase( _lowerCamelCase ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self: Union[str, Any] ,a: Tuple ,a: Dict ,a: Dict="replace" ,a: int="<s>" ,a: List[str]="</s>" ,a: Any="</s>" ,a: str="<s>" ,a: Dict="<unk>" ,a: Union[str, Any]="<pad>" ,a: Optional[int]="<mask>" ,a: int=False ,**a: int ,):
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else bos_token
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else eos_token
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else sep_token
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else cls_token
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else unk_token
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else mask_token
super().__init__(
errors=a ,bos_token=a ,eos_token=a ,unk_token=a ,sep_token=a ,cls_token=a ,pad_token=a ,mask_token=a ,add_prefix_space=a ,**a ,)
with open(a ,encoding='utf-8' ) as vocab_handle:
__UpperCAmelCase = json.load(a )
__UpperCAmelCase = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase = errors # how to handle errors in decoding
__UpperCAmelCase = bytes_to_unicode()
__UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(a ,encoding='utf-8' ) as merges_handle:
__UpperCAmelCase = merges_handle.read().split('\n' )[1:-1]
__UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase = dict(zip(a ,range(len(a ) ) ) )
__UpperCAmelCase = {}
__UpperCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case ( self: Optional[Any] ):
return len(self.encoder )
def snake_case ( self: Optional[Any] ):
return dict(self.encoder ,**self.added_tokens_encoder )
def snake_case ( self: Optional[int] ,a: Optional[int] ):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase = tuple(a )
__UpperCAmelCase = get_pairs(a )
if not pairs:
return token
while True:
__UpperCAmelCase = min(a ,key=lambda a : self.bpe_ranks.get(a ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase = bigram
__UpperCAmelCase = []
__UpperCAmelCase = 0
while i < len(a ):
try:
__UpperCAmelCase = word.index(a ,a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase = tuple(a )
__UpperCAmelCase = new_word
if len(a ) == 1:
break
else:
__UpperCAmelCase = get_pairs(a )
__UpperCAmelCase = ' '.join(a )
__UpperCAmelCase = word
return word
def snake_case ( self: int ,a: str ):
__UpperCAmelCase = []
for token in re.findall(self.pat ,a ):
__UpperCAmelCase = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(' ' ) )
return bpe_tokens
def snake_case ( self: Optional[Any] ,a: Union[str, Any] ):
return self.encoder.get(a ,self.encoder.get(self.unk_token ) )
def snake_case ( self: Any ,a: Union[str, Any] ):
return self.decoder.get(a )
def snake_case ( self: Dict ,a: Union[str, Any] ):
__UpperCAmelCase = ''.join(a )
__UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' ,errors=self.errors )
return text
def snake_case ( self: Optional[Any] ,a: str ,a: Optional[str] = None ):
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase = os.path.join(
a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__UpperCAmelCase = os.path.join(
a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(a ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=a ,ensure_ascii=a ) + '\n' )
__UpperCAmelCase = 0
with open(a ,'w' ,encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
__UpperCAmelCase = token_index
writer.write(' '.join(a ) + '\n' )
index += 1
return vocab_file, merge_file
def snake_case ( self: List[str] ,a: List[int] ,a: Optional[List[int]] = None ,a: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a ,token_ids_a=a ,already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def snake_case ( self: Optional[int] ,a: List[int] ,a: Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self: Dict ,a: List[Any] ,a: Optional[int]=False ,**a: Optional[Any] ):
__UpperCAmelCase = kwargs.pop('add_prefix_space' ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()):
__UpperCAmelCase = ' ' + text
return (text, kwargs)
def snake_case ( self: Tuple ,a: List[int] ,a: Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def snake_case ( self: Any ,a: "Conversation" ):
__UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(a )
__UpperCAmelCase = ' '.join(a )
__UpperCAmelCase = self.encode(a )
if len(a ) > self.model_max_length:
__UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 396 | 0 |
SCREAMING_SNAKE_CASE__ = "Tobias Carryer"
from time import time
class _UpperCAmelCase :
def __init__( self : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str=int(time())): # noqa: B008
SCREAMING_SNAKE_CASE_ :List[Any] = multiplier
SCREAMING_SNAKE_CASE_ :List[Any] = increment
SCREAMING_SNAKE_CASE_ :Dict = modulo
SCREAMING_SNAKE_CASE_ :Union[str, Any] = seed
def _snake_case ( self : Dict):
SCREAMING_SNAKE_CASE_ :Any = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
SCREAMING_SNAKE_CASE__ = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31)
while True:
print(lcg.next_number())
| 140 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowercase ( a ): # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def lowercase ( a ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class _UpperCAmelCase :
lowerCamelCase_ : int
lowerCamelCase_ : str
class _UpperCAmelCase ( lowercase ):
def _snake_case ( self : str):
SCREAMING_SNAKE_CASE_ :Optional[int] = {}
SCREAMING_SNAKE_CASE_ :str = []
SCREAMING_SNAKE_CASE_ :List[Any] = 1
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [1, 2]
SCREAMING_SNAKE_CASE_ :List[Any] = {"a": 1, "b": 2}
SCREAMING_SNAKE_CASE_ :Any = {"a": [1, 2], "b": [3, 4]}
SCREAMING_SNAKE_CASE_ :List[str] = {"a": {"1": 1}, "b": 2}
SCREAMING_SNAKE_CASE_ :str = {"a": 1, "b": 2, "c": 3, "d": 4}
SCREAMING_SNAKE_CASE_ :Dict = {}
SCREAMING_SNAKE_CASE_ :List[Any] = []
SCREAMING_SNAKE_CASE_ :List[str] = 2
SCREAMING_SNAKE_CASE_ :Any = [2, 3]
SCREAMING_SNAKE_CASE_ :int = {"a": 2, "b": 3}
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {"a": [2, 3], "b": [4, 5]}
SCREAMING_SNAKE_CASE_ :str = {"a": {"1": 2}, "b": 3}
SCREAMING_SNAKE_CASE_ :Tuple = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase) , UpperCAmelCase)
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase) , UpperCAmelCase)
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase) , UpperCAmelCase)
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase) , UpperCAmelCase)
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase) , UpperCAmelCase)
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase) , UpperCAmelCase)
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase) , UpperCAmelCase)
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase) , UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[str] = 2
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase) , UpperCAmelCase)
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase) , UpperCAmelCase)
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase) , UpperCAmelCase)
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase) , UpperCAmelCase)
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase) , UpperCAmelCase)
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase) , UpperCAmelCase)
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase) , UpperCAmelCase)
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase) , UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Tuple = {"a": np.eye(2), "b": np.zeros(3), "c": np.ones(2)}
SCREAMING_SNAKE_CASE_ :List[Any] = {"a": 2, "b": 0, "c": 2}
SCREAMING_SNAKE_CASE_ :Optional[int] = {
"a": np.eye(2).astype(UpperCAmelCase),
"b": np.zeros(3).astype(UpperCAmelCase),
"c": np.ones(2).astype(UpperCAmelCase),
}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase) , UpperCAmelCase)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase) , UpperCAmelCase)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(UpperCAmelCase): # can't pickle a local lambda
map_nested(lambda UpperCAmelCase: x + 1 , UpperCAmelCase , num_proc=UpperCAmelCase)
def _snake_case ( self : int):
SCREAMING_SNAKE_CASE_ :List[Any] = {"a": 1, "b": 2}
SCREAMING_SNAKE_CASE_ :Any = {"a": 3, "b": 4}
SCREAMING_SNAKE_CASE_ :Tuple = {"a": 5, "b": 6}
SCREAMING_SNAKE_CASE_ :List[str] = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))])
self.assertEqual(sorted(zip_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)) , UpperCAmelCase)
def _snake_case ( self : Optional[Any]):
class _UpperCAmelCase :
lowerCamelCase_ : Tuple = """bar"""
SCREAMING_SNAKE_CASE_ :str = Foo()
self.assertEqual(foo.my_attr , "bar")
with temporary_assignment(UpperCAmelCase , "my_attr" , "BAR"):
self.assertEqual(foo.my_attr , "BAR")
self.assertEqual(foo.my_attr , "bar")
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def lowercase ( a , a , a ):
'''simple docstring'''
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {F"{i}": i for i in range(a )}
SCREAMING_SNAKE_CASE_ :Optional[Any] = map_nested(lambda a : x + 10 , a , num_proc=a , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _UpperCAmelCase ( lowercase ):
@require_tf
def _snake_case ( self : Any):
import tensorflow as tf
from tensorflow.keras import layers
SCREAMING_SNAKE_CASE_ :Union[str, Any] = layers.Dense(2)
def gen_random_output():
SCREAMING_SNAKE_CASE_ :Optional[int] = tf.random.uniform((1, 3))
return model(UpperCAmelCase).numpy()
with temp_seed(42 , set_tensorflow=UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :str = gen_random_output()
with temp_seed(42 , set_tensorflow=UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :Dict = gen_random_output()
SCREAMING_SNAKE_CASE_ :Union[str, Any] = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@require_torch
def _snake_case ( self : Dict):
import torch
def gen_random_output():
SCREAMING_SNAKE_CASE_ :str = torch.nn.Linear(3 , 2)
SCREAMING_SNAKE_CASE_ :str = torch.rand(1 , 3)
return model(UpperCAmelCase).detach().numpy()
with temp_seed(42 , set_pytorch=UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :List[str] = gen_random_output()
with temp_seed(42 , set_pytorch=UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :Optional[int] = gen_random_output()
SCREAMING_SNAKE_CASE_ :int = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase)
self.assertGreater(np.abs(outa - outa).sum() , 0)
def _snake_case ( self : Tuple):
def gen_random_output():
return np.random.rand(1 , 3)
with temp_seed(42):
SCREAMING_SNAKE_CASE_ :Dict = gen_random_output()
with temp_seed(42):
SCREAMING_SNAKE_CASE_ :Any = gen_random_output()
SCREAMING_SNAKE_CASE_ :int = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@pytest.mark.parametrize("input_data" , [{}] )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Optional[Any] = NestedDataStructure(a ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Tuple = NestedDataStructure(a ).flatten()
assert output == expected_output
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :str = A(x=1 , y="foobar" )
SCREAMING_SNAKE_CASE_ :Optional[int] = {"x": 1, "y": "foobar"}
assert asdict(a ) == expected_output
SCREAMING_SNAKE_CASE_ :int = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
SCREAMING_SNAKE_CASE_ :List[str] = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(a ) == expected_output
with pytest.raises(a ):
asdict([1, A(x=10 , y="foo" )] )
def lowercase ( a ):
'''simple docstring'''
return text.split()
def lowercase ( a ):
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowercase ( ):
'''simple docstring'''
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ :Optional[int] = list(iflatmap_unordered(a , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(a ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ :List[str] = list(iflatmap_unordered(a , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(a ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ :Dict = []
for yield_time, content in iflatmap_unordered(
a , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(a )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(a ) == 4
| 140 | 1 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _lowercase ( __lowerCamelCase : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
'''simple docstring'''
UpperCamelCase__ : Tuple = []
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(__lowerCamelCase ) )
elif isinstance(__lowerCamelCase ,(list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__lowerCamelCase ) )
elif isinstance(__lowerCamelCase ,torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def _lowercase ( __lowerCamelCase : int ,__lowerCamelCase : Tuple[int, ...] ) -> Tuple[int, ...]:
'''simple docstring'''
UpperCamelCase__ : Dict = []
for d in reversed(__lowerCamelCase ):
idx.append(flat_idx % d )
UpperCamelCase__ : Optional[int] = flat_idx // d
return tuple(reversed(__lowerCamelCase ) )
@torch.jit.ignore
def _lowercase ( __lowerCamelCase : Sequence[int] ,__lowerCamelCase : Sequence[int] ,__lowerCamelCase : Sequence[int] ,__lowerCamelCase : Optional[Sequence[bool]] = None ,__lowerCamelCase : Optional[Sequence[bool]] = None ,) -> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(__lowerCamelCase : List[bool] ) -> None:
UpperCamelCase__ : str = True
for i in range(len(__lowerCamelCase ) ):
UpperCamelCase__ : Union[str, Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__ : List[Any] = l[reversed_idx]
if start_edges is None:
UpperCamelCase__ : Optional[int] = [s == 0 for s in start]
reduce_edge_list(__lowerCamelCase )
if end_edges is None:
UpperCamelCase__ : Optional[int] = [e == (d - 1) for e, d in zip(__lowerCamelCase ,__lowerCamelCase )]
reduce_edge_list(__lowerCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__lowerCamelCase ) == 0:
return [()]
elif len(__lowerCamelCase ) == 1:
return [(slice(start[0] ,end[0] + 1 ),)]
UpperCamelCase__ : List[Tuple[slice, ...]] = []
UpperCamelCase__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__lowerCamelCase ,__lowerCamelCase ):
if s == e:
path_list.append(slice(__lowerCamelCase ,s + 1 ) )
else:
break
UpperCamelCase__ : Tuple[slice, ...] = tuple(__lowerCamelCase )
UpperCamelCase__ : List[Any] = len(__lowerCamelCase )
# start == end, and we're done
if divergence_idx == len(__lowerCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : Optional[int] = start[divergence_idx]
return tuple(
path + (slice(__lowerCamelCase ,sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] ,[d - 1 for d in dims[divergence_idx + 1 :]] ,dims[divergence_idx + 1 :] ,start_edges=start_edges[divergence_idx + 1 :] ,end_edges=[True for _ in end_edges[divergence_idx + 1 :]] ,) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : List[Any] = end[divergence_idx]
return tuple(
path + (slice(__lowerCamelCase ,edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] ,end[divergence_idx + 1 :] ,dims[divergence_idx + 1 :] ,start_edges=[True for _ in start_edges[divergence_idx + 1 :]] ,end_edges=end_edges[divergence_idx + 1 :] ,) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCamelCase__ : int = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _lowercase ( __lowerCamelCase : torch.Tensor ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : int ) -> torch.Tensor:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = t.shape[:no_batch_dims]
UpperCamelCase__ : str = list(_flat_idx_to_idx(__lowerCamelCase ,__lowerCamelCase ) )
# _get_minimal_slice_set is inclusive
UpperCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 ,__lowerCamelCase ) )
# Get an ordered list of slices to perform
UpperCamelCase__ : Dict = _get_minimal_slice_set(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,)
UpperCamelCase__ : Dict = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _lowercase ( __lowerCamelCase : Callable ,__lowerCamelCase : Dict[str, Any] ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : bool = False ,__lowerCamelCase : Any = None ,__lowerCamelCase : bool = False ,) -> Any:
'''simple docstring'''
if not (len(__lowerCamelCase ) > 0):
raise ValueError('''Must provide at least one input''' )
UpperCamelCase__ : Union[str, Any] = [shape[:no_batch_dims] for shape in _fetch_dims(__lowerCamelCase )]
UpperCamelCase__ : Union[str, Any] = tuple([max(__lowerCamelCase ) for s in zip(*__lowerCamelCase )] )
def _prep_inputs(__lowerCamelCase : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCamelCase__ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCamelCase__ : Dict = t.reshape(-1 ,*t.shape[no_batch_dims:] )
else:
UpperCamelCase__ : Union[str, Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs ,__lowerCamelCase )
UpperCamelCase__ : List[str] = None
if _out is not None:
UpperCamelCase__ : List[Any] = tensor_tree_map(lambda __lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) ,_out )
UpperCamelCase__ : Optional[int] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__ : Any = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__lowerCamelCase : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__ : int = 0
UpperCamelCase__ : str = prepped_outputs
for _ in range(__lowerCamelCase ):
# Chunk the input
if not low_mem:
UpperCamelCase__ : Any = _select_chunk
else:
UpperCamelCase__ : Union[str, Any] = partial(
_chunk_slice ,flat_start=__lowerCamelCase ,flat_end=min(__lowerCamelCase ,i + chunk_size ) ,no_batch_dims=len(__lowerCamelCase ) ,)
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(__lowerCamelCase ,__lowerCamelCase )
# Run the layer on the chunk
UpperCamelCase__ : Dict = layer(**__lowerCamelCase )
# Allocate space for the output
if out is None:
UpperCamelCase__ : Optional[Any] = tensor_tree_map(lambda __lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) ,__lowerCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
def assign(__lowerCamelCase : dict ,__lowerCamelCase : dict ) -> None:
for k, v in da.items():
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
assign(__lowerCamelCase ,da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__ : Dict = da[k]
assign(__lowerCamelCase ,__lowerCamelCase )
elif isinstance(__lowerCamelCase ,__lowerCamelCase ):
for xa, xa in zip(__lowerCamelCase ,__lowerCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__ : Optional[Any] = xa
elif isinstance(__lowerCamelCase ,torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__ : int = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
UpperCamelCase__ : Any = tensor_tree_map(lambda __lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) ,__lowerCamelCase )
return out
class UpperCamelCase__ :
def __init__( self : List[Any], __lowerCamelCase : int = 5_12, ) -> Dict:
UpperCamelCase__ : str = max_chunk_size
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[tuple] = None
def __lowercase( self : str, __lowerCamelCase : Callable, __lowerCamelCase : tuple, __lowerCamelCase : int ) -> int:
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size, 2 ) ) + 1 )]
UpperCamelCase__ : Union[str, Any] = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__ : int = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__lowerCamelCase : int ) -> bool:
try:
with torch.no_grad():
fn(*__lowerCamelCase, chunk_size=__lowerCamelCase )
return True
except RuntimeError:
return False
UpperCamelCase__ : int = 0
UpperCamelCase__ : List[Any] = len(__lowerCamelCase ) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__ : str = test_chunk_size(candidates[i] )
if not viable:
UpperCamelCase__ : List[str] = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__ : List[str] = i
UpperCamelCase__ : str = (i + len(__lowerCamelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __lowercase( self : Optional[Any], __lowerCamelCase : Iterable, __lowerCamelCase : Iterable ) -> bool:
UpperCamelCase__ : Tuple = True
for aa, aa in zip(__lowerCamelCase, __lowerCamelCase ):
assert type(__lowerCamelCase ) == type(__lowerCamelCase )
if isinstance(__lowerCamelCase, (list, tuple) ):
consistent &= self._compare_arg_caches(__lowerCamelCase, __lowerCamelCase )
elif isinstance(__lowerCamelCase, __lowerCamelCase ):
UpperCamelCase__ : Optional[Any] = [v for _, v in sorted(aa.items(), key=lambda __lowerCamelCase : x[0] )]
UpperCamelCase__ : Optional[Any] = [v for _, v in sorted(aa.items(), key=lambda __lowerCamelCase : x[0] )]
consistent &= self._compare_arg_caches(__lowerCamelCase, __lowerCamelCase )
else:
consistent &= aa == aa
return consistent
def __lowercase( self : Optional[int], __lowerCamelCase : Callable, __lowerCamelCase : tuple, __lowerCamelCase : int, ) -> int:
UpperCamelCase__ : Optional[int] = True
UpperCamelCase__ : tuple = tree_map(lambda __lowerCamelCase : a.shape if isinstance(__lowerCamelCase, torch.Tensor ) else a, __lowerCamelCase, __lowerCamelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__lowerCamelCase )
UpperCamelCase__ : Tuple = self._compare_arg_caches(self.cached_arg_data, __lowerCamelCase )
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__ : Optional[Any] = False
if not consistent:
UpperCamelCase__ : Optional[int] = self._determine_favorable_chunk_size(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, )
UpperCamelCase__ : int = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 344 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_SCREAMING_SNAKE_CASE : Dict = get_tests_dir("""fixtures""")
class UpperCamelCase__ ( unittest.TestCase ):
def __lowercase( self : Any ) -> Tuple:
# A mock response for an HTTP head request to emulate server down
UpperCamelCase__ : Optional[int] = mock.Mock()
UpperCamelCase__ : Optional[Any] = 5_00
UpperCamelCase__ : List[Any] = {}
UpperCamelCase__ : Optional[int] = HTTPError
UpperCamelCase__ : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
UpperCamelCase__ : Dict = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''', return_value=__lowerCamelCase ) as mock_head:
UpperCamelCase__ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase( self : List[Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
@classmethod
def __lowercase( cls : Tuple ) -> Dict:
UpperCamelCase__ : str = TOKEN
HfFolder.save_token(__lowerCamelCase )
@classmethod
def __lowercase( cls : List[str] ) -> Any:
try:
delete_repo(token=cls._token, repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def __lowercase( self : List[str] ) -> List[str]:
UpperCamelCase__ : int = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase )
feature_extractor.push_to_hub('''test-feature-extractor''', use_auth_token=self._token )
UpperCamelCase__ : int = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase, getattr(__lowerCamelCase, __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCamelCase, repo_id='''test-feature-extractor''', push_to_hub=__lowerCamelCase, use_auth_token=self._token )
UpperCamelCase__ : Tuple = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase, getattr(__lowerCamelCase, __lowerCamelCase ) )
def __lowercase( self : List[Any] ) -> List[Any]:
UpperCamelCase__ : int = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''', use_auth_token=self._token )
UpperCamelCase__ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase, getattr(__lowerCamelCase, __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCamelCase, repo_id='''valid_org/test-feature-extractor-org''', push_to_hub=__lowerCamelCase, use_auth_token=self._token )
UpperCamelCase__ : List[str] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase, getattr(__lowerCamelCase, __lowerCamelCase ) )
def __lowercase( self : int ) -> Union[str, Any]:
CustomFeatureExtractor.register_for_auto_class()
UpperCamelCase__ : Tuple = CustomFeatureExtractor.from_pretrained(__lowerCamelCase )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map, {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''}, )
UpperCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
f'{USER}/test-dynamic-feature-extractor', trust_remote_code=__lowerCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__, '''CustomFeatureExtractor''' )
| 344 | 1 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 715 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# TODO: upload to AWS
SCREAMING_SNAKE_CASE__ = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "retribert"
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=3_05_22 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : List[Any]=8 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=5_12 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : List[Any]=1e-1_2 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Optional[int]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = share_encoders
__lowercase = projection_dim
| 688 | 0 |
lowerCAmelCase__ :Any = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCAmelCase__ :Optional[Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCAmelCase__ :List[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 618 |
import math
def _UpperCAmelCase ( a__ = 1_0_0):
'''simple docstring'''
a_ : List[str] = sum(i * i for i in range(1 , n + 1))
a_ : Optional[Any] = int(math.pow(sum(range(1 , n + 1)) , 2))
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 540 | 0 |
'''simple docstring'''
def A__ ( A : str , A : str):
'''simple docstring'''
UpperCamelCase : Tuple = len(A)
UpperCamelCase : Tuple = len(A)
UpperCamelCase : Union[str, Any] = [[False for _ in range(m + 1)] for _ in range(n + 1)]
UpperCamelCase : str = True
for i in range(A):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
UpperCamelCase : List[str] = True
if a[i].islower():
UpperCamelCase : List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 435 |
'''simple docstring'''
def A__ ( A : str , A : str):
'''simple docstring'''
if not (isinstance(A , A) and isinstance(A , A)):
raise ValueError("longest_common_substring() takes two strings for inputs")
UpperCamelCase : Optional[int] = len(A)
UpperCamelCase : int = len(A)
UpperCamelCase : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1)]
UpperCamelCase : Any = 0
UpperCamelCase : Any = 0
for i in range(1 , texta_length + 1):
for j in range(1 , texta_length + 1):
if texta[i - 1] == texta[j - 1]:
UpperCamelCase : Dict = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
UpperCamelCase : Optional[int] = i
UpperCamelCase : int = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 435 | 1 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Dict = inspect.getfile(accelerate.test_utils)
_lowercase : str = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['scripts', 'test_script.py'])
_lowercase : Dict = os.path.sep.join(inspect.getfile(self.__class__).split(os.path.sep)[:-1])
@require_tpu
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Any = F'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
_lowercase : Any = [sys.executable] + distributed_args
execute_subprocess_async(lowerCamelCase, env=os.environ.copy())
| 89 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCamelCase_( ) -> List[Any]:
_lowercase : int = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
_lowercase : Optional[Any] = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
_lowercase : Any = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
_lowercase : Optional[int] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 89 | 1 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
lowerCAmelCase = logging.get_logger(__name__)
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Any = '''vision-encoder-decoder'''
_lowerCAmelCase : Tuple = True
def __init__( self , **lowercase__):
super().__init__(**lowercase__)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"A configuraton of type {self.model_type} cannot be instantiated because "
F"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}")
__UpperCAmelCase : int = kwargs.pop('''encoder''')
__UpperCAmelCase : Optional[int] = encoder_config.pop('''model_type''')
__UpperCAmelCase : Tuple = kwargs.pop('''decoder''')
__UpperCAmelCase : Any = decoder_config.pop('''model_type''')
__UpperCAmelCase : Union[str, Any] = AutoConfig.for_model(lowercase__ , **lowercase__)
__UpperCAmelCase : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__)
__UpperCAmelCase : str = True
@classmethod
def A( cls , lowercase__ , lowercase__ , **lowercase__):
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''')
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Optional[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__)
__UpperCAmelCase : Optional[int] = self.encoder.to_dict()
__UpperCAmelCase : Optional[Any] = self.decoder.to_dict()
__UpperCAmelCase : List[str] = self.__class__.model_type
return output
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = version.parse('''1.11''' )
@property
def A( self):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def A( self):
return 1e-4
@property
def A( self):
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}})
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
__UpperCAmelCase : Dict = OrderedDict()
__UpperCAmelCase : str = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
__UpperCAmelCase : str = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
__UpperCAmelCase : str = {0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def A( self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , ):
import torch
__UpperCAmelCase : Optional[Any] = OrderedDict()
__UpperCAmelCase : int = super().generate_dummy_inputs(
lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__)
__UpperCAmelCase : Optional[Any] = dummy_input['''input_ids'''].shape
__UpperCAmelCase : List[Any] = (batch, encoder_sequence, self._config.encoder_hidden_size)
__UpperCAmelCase : Optional[Any] = dummy_input.pop('''input_ids''')
__UpperCAmelCase : Optional[int] = dummy_input.pop('''attention_mask''')
__UpperCAmelCase : Tuple = torch.zeros(lowercase__)
return common_inputs
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
pass
def A( self , lowercase__):
return VisionEncoderDecoderEncoderOnnxConfig(lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__ = "default"):
__UpperCAmelCase : List[Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(lowercase__ , lowercase__)
| 716 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
if n_element < 1:
__UpperCAmelCase : str = ValueError('''a should be a positive number''' )
raise my_error
__UpperCAmelCase : Any = [1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = (0, 0, 0)
__UpperCAmelCase : int = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 675 | 0 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
_SCREAMING_SNAKE_CASE = TypeVar("KT")
_SCREAMING_SNAKE_CASE = TypeVar("VT")
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : KT | str = "root" , __snake_case : VT | None = None )-> Dict:
snake_case = key
snake_case = value
snake_case = []
def __repr__( self : List[str] )-> str:
return f'''Node({self.key}: {self.value})'''
@property
def lowerCAmelCase ( self : str )-> int:
return len(self.forward )
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __snake_case : float = 0.5 , __snake_case : int = 16 )-> List[str]:
snake_case = Node[KT, VT]()
snake_case = 0
snake_case = p
snake_case = max_level
def __str__( self : Any )-> str:
snake_case = list(self )
if len(__snake_case ) == 0:
return f'''SkipList(level={self.level})'''
snake_case = max((len(str(__snake_case ) ) for item in items) , default=4 )
snake_case = max(__snake_case , 4 ) + 4
snake_case = self.head
snake_case = []
snake_case = node.forward.copy()
lines.append(f'''[{node.key}]'''.ljust(__snake_case , """-""" ) + """* """ * len(__snake_case ) )
lines.append(""" """ * label_size + """| """ * len(__snake_case ) )
while len(node.forward ) != 0:
snake_case = node.forward[0]
lines.append(
f'''[{node.key}]'''.ljust(__snake_case , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(__snake_case ) )
snake_case = node.forward
lines.append("""None""".ljust(__snake_case ) + """* """ * len(__snake_case ) )
return f'''SkipList(level={self.level})\n''' + "\n".join(__snake_case )
def __iter__( self : Union[str, Any] )-> Union[str, Any]:
snake_case = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
snake_case = node.forward[0]
def lowerCAmelCase ( self : Tuple )-> int:
snake_case = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : int )-> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
snake_case = []
snake_case = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
snake_case = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__snake_case )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def lowerCAmelCase ( self : Tuple , __snake_case : KT )-> List[Any]:
snake_case , snake_case = self._locate_node(__snake_case )
if node is not None:
for i, update_node in enumerate(__snake_case ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
snake_case = node.forward[i]
else:
snake_case = update_node.forward[:i]
def lowerCAmelCase ( self : Any , __snake_case : KT , __snake_case : VT )-> Tuple:
snake_case , snake_case = self._locate_node(__snake_case )
if node is not None:
snake_case = value
else:
snake_case = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __snake_case ):
update_vector.append(self.head )
snake_case = level
snake_case = Node(__snake_case , __snake_case )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__snake_case )
else:
snake_case = new_node
def lowerCAmelCase ( self : Tuple , __snake_case : VT )-> VT | None:
snake_case , snake_case = self._locate_node(__snake_case )
if node is not None:
return node.value
return None
def __lowerCamelCase ( ) -> List[Any]:
snake_case = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 12 )
skip_list.insert("""Key3""" , 41 )
skip_list.insert("""Key4""" , -19 )
snake_case = skip_list.head
snake_case = {}
while node.level != 0:
snake_case = node.forward[0]
snake_case = node.value
assert len(__lowerCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __lowerCamelCase ( ) -> str:
snake_case = SkipList()
skip_list.insert("""Key1""" , 10 )
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 10 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 10 )
snake_case = skip_list.head
snake_case = {}
while node.level != 0:
snake_case = node.forward[0]
snake_case = node.value
if len(__lowerCAmelCase ) != 4:
print()
assert len(__lowerCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __lowerCamelCase ( ) -> str:
snake_case = SkipList()
assert skip_list.find("""Some key""" ) is None
def __lowerCamelCase ( ) -> int:
snake_case = SkipList()
skip_list.insert("""Key2""" , 20 )
assert skip_list.find("""Key2""" ) == 20
skip_list.insert("""Some Key""" , 10 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 13 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 10
assert skip_list.find("""V""" ) == 13
def __lowerCamelCase ( ) -> Union[str, Any]:
snake_case = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def __lowerCamelCase ( ) -> Tuple:
snake_case = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCamelCase ( ) -> Optional[int]:
snake_case = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 14
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCamelCase ( ) -> Optional[Any]:
snake_case = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 1_42 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""X""" )
def traverse_keys(__lowerCAmelCase : str ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__lowerCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __lowerCamelCase ( ) -> Union[str, Any]:
def is_sorted(__lowerCAmelCase : Optional[Any] ):
return all(next_item >= item for item, next_item in zip(__lowerCAmelCase , lst[1:] ) )
snake_case = SkipList()
for i in range(10 ):
skip_list.insert(__lowerCAmelCase , __lowerCAmelCase )
assert is_sorted(list(__lowerCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__lowerCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(__lowerCAmelCase ) )
def __lowerCamelCase ( ) -> Any:
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __lowerCamelCase ( ) -> Optional[int]:
snake_case = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 369 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
_SCREAMING_SNAKE_CASE = {
"google/rembert": 256,
}
_SCREAMING_SNAKE_CASE = "▁"
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = RemBertTokenizer
def __init__( self : List[Any] , __snake_case : int=None , __snake_case : Union[str, Any]=None , __snake_case : Tuple=True , __snake_case : Dict=True , __snake_case : str=False , __snake_case : Union[str, Any]="[CLS]" , __snake_case : Optional[int]="[SEP]" , __snake_case : str="<unk>" , __snake_case : Dict="[SEP]" , __snake_case : Dict="<pad>" , __snake_case : Union[str, Any]="[CLS]" , __snake_case : int="[MASK]" , **__snake_case : Optional[int] , )-> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , **__snake_case , )
snake_case = do_lower_case
snake_case = remove_space
snake_case = keep_accents
snake_case = vocab_file
snake_case = False if not self.vocab_file else True
def lowerCAmelCase ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None )-> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False )-> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1]
def lowerCAmelCase ( self : int , __snake_case : List[int] , __snake_case : Optional[List[int]] = None )-> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : Tuple , __snake_case : str , __snake_case : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error("""Vocabulary path ({}) should be a directory""".format(__snake_case ) )
return
snake_case = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 369 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowercase ( _a , unittest.TestCase):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowercase ( unittest.TestCase):
"""simple docstring"""
@property
def __UpperCamelCase (self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCamelCase (self ):
snake_case_ : str = ort.SessionOptions()
snake_case_ : Any = False
return options
def __UpperCamelCase (self ):
snake_case_ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
snake_case_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
snake_case_ : Dict = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case_ : Dict = """A red cat sitting on a park bench"""
snake_case_ : List[str] = np.random.RandomState(0 )
snake_case_ : Optional[int] = pipe(
prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type="""np""" , )
snake_case_ : Optional[int] = output.images
snake_case_ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
snake_case_ : Tuple = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase (self ):
snake_case_ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
snake_case_ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
snake_case_ : List[str] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
snake_case_ : List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case_ : List[Any] = """A red cat sitting on a park bench"""
snake_case_ : str = np.random.RandomState(0 )
snake_case_ : Optional[int] = pipe(
prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case_ , output_type="""np""" , )
snake_case_ : Union[str, Any] = output.images
snake_case_ : str = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
snake_case_ : Optional[int] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 720 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
if start < end:
snake_case_ : Union[str, Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = a[end]
snake_case_ : Dict = a[pivot]
snake_case_ : Any = temp
snake_case_ , snake_case_ : Dict = _in_place_partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , p + 1 , SCREAMING_SNAKE_CASE__ )
return count
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
snake_case_ : Tuple = 0
snake_case_ : List[Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = a[end]
snake_case_ : List[Any] = a[pivot]
snake_case_ : Optional[Any] = temp
snake_case_ : List[str] = start - 1
for index in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case_ : Any = new_pivot_index + 1
snake_case_ : Tuple = a[new_pivot_index]
snake_case_ : Optional[int] = a[index]
snake_case_ : Tuple = temp
snake_case_ : Union[str, Any] = a[new_pivot_index + 1]
snake_case_ : Union[str, Any] = a[end]
snake_case_ : Union[str, Any] = temp
return new_pivot_index + 1, count
a_ = TemporaryFile()
a_ = 100 # 1000 elements are to be sorted
a_ , a_ = 0, 1 # mean and standard deviation
a_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a_ = np.load(outfile)
a_ = len(M) - 1
a_ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 48 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase : List[Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 529 |
import string
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = """"""
for i in sequence:
__magic_name__ = ord(A_ )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = string.ascii_letters
__magic_name__ = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(A_ )] if c in letters else c for c in sequence )
def a__ ( ):
'''simple docstring'''
from timeit import timeit
print("""Running performance benchmarks...""" )
__magic_name__ = """from string import printable ; from __main__ import atbash, atbash_slow"""
print(f'''> atbash_slow(): {timeit('atbash_slow(printable)', setup=A_ )} seconds''' )
print(f'''> atbash(): {timeit('atbash(printable)', setup=A_ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 529 | 1 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict[str, torch.Tensor] ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for rt in rc.restypes:
SCREAMING_SNAKE_CASE__ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
SCREAMING_SNAKE_CASE__ = {name: i for i, name in enumerate(__UpperCamelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
SCREAMING_SNAKE_CASE__ = torch.tensor(
__UpperCamelCase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE__ = torch.tensor(
__UpperCamelCase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE__ = torch.tensor(
__UpperCamelCase , dtype=torch.floataa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE__ = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE__ = restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE__ = restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE__ = residx_atomaa_mask
SCREAMING_SNAKE_CASE__ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE__ = restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE__ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE__ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
SCREAMING_SNAKE_CASE__ = rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE__ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE__ = rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE__ = residx_atomaa_mask
return protein
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict[str, torch.Tensor] ) -> Dict[str, np.ndarray]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = tree_map(lambda __UpperCamelCase : torch.tensor(__UpperCamelCase , device=batch["""aatype"""].device ) , __UpperCamelCase , np.ndarray )
SCREAMING_SNAKE_CASE__ = tensor_tree_map(lambda __UpperCamelCase : np.array(__UpperCamelCase ) , make_atomaa_masks(__UpperCamelCase ) )
return out
| 721 | def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0, 0, 0
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 5
for _ in range(1 , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
ugly_nums.append(__UpperCamelCase )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(200) = }""")
| 379 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 486 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : List[str] = ['pixel_values']
def __init__(self : Optional[int] , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Dict[str, int]] = None , __UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCAmelCase : bool = True , __UpperCAmelCase : bool = True , __UpperCAmelCase : Union[int, float] = 1 / 2_5_5 , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , **__UpperCAmelCase : Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
UpperCAmelCase__ = size if size is not None else {"height": 2_2_4, "width": 2_2_4}
UpperCAmelCase__ = get_size_dict(__UpperCAmelCase )
UpperCAmelCase__ = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
UpperCAmelCase__ = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name="crop_size" )
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = do_rescale
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = do_center_crop
UpperCAmelCase__ = crop_size
UpperCAmelCase__ = size
UpperCAmelCase__ = resample
UpperCAmelCase__ = rescale_factor
UpperCAmelCase__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase_ (self : Dict , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Dict[str, int] , __UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : List[Any] , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase__ = get_size_dict(__UpperCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase__ = get_resize_output_image_size(__UpperCAmelCase , size=size["shortest_edge"] , default_to_square=__UpperCAmelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCAmelCase__ = (size["height"], size["width"])
else:
raise ValueError(f"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ (self : Tuple , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Dict[str, int] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Any , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase__ = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__UpperCAmelCase , size=(size["height"], size["width"]) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ (self : int , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : float , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Tuple ) -> np.ndarray:
"""simple docstring"""
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ (self : int , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Union[float, List[float]] , __UpperCAmelCase : Union[float, List[float]] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : ImageInput , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : PILImageResampling = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : int = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[float] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__UpperCAmelCase : Any , ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ = get_size_dict(__UpperCAmelCase , param_name="crop_size" , default_to_square=__UpperCAmelCase )
UpperCAmelCase__ = resample if resample is not None else self.resample
UpperCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ = image_std if image_std is not None else self.image_std
UpperCAmelCase__ = size if size is not None else self.size
UpperCAmelCase__ = get_size_dict(__UpperCAmelCase )
if not is_batched(__UpperCAmelCase ):
UpperCAmelCase__ = [images]
if not valid_images(__UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase__ = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
UpperCAmelCase__ = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase__ = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase__ = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
UpperCAmelCase__ = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
UpperCAmelCase__ = {"pixel_values": images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 486 | 1 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def a ( UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : int="pt" ) -> Any:
snake_case__ ={'add_prefix_space': True} if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and not line.startswith(' ' ) else {}
snake_case__ =padding_side
return tokenizer(
[line] , max_length=UpperCamelCase_ , padding='max_length' if pad_to_max_length else None , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
def a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple=None , ) -> Any:
snake_case__ =input_ids.ne(UpperCamelCase_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class a__( snake_case__ ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="train" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="" , ) -> str:
super().__init__()
snake_case__ =Path(_UpperCAmelCase ).joinpath(type_path + '.source' )
snake_case__ =Path(_UpperCAmelCase ).joinpath(type_path + '.target' )
snake_case__ =self.get_char_lens(self.src_file )
snake_case__ =max_source_length
snake_case__ =max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
snake_case__ =tokenizer
snake_case__ =prefix
if n_obs is not None:
snake_case__ =self.src_lens[:n_obs]
snake_case__ =src_lang
snake_case__ =tgt_lang
def __len__( self ) -> Any:
return len(self.src_lens )
def __getitem__( self , _UpperCAmelCase ) -> Dict[str, torch.Tensor]:
snake_case__ =index + 1 # linecache starts at 1
snake_case__ =self.prefix + linecache.getline(str(self.src_file ) , _UpperCAmelCase ).rstrip('\n' )
snake_case__ =linecache.getline(str(self.tgt_file ) , _UpperCAmelCase ).rstrip('\n' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _UpperCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case__ =(
self.tokenizer.question_encoder if isinstance(self.tokenizer , _UpperCAmelCase ) else self.tokenizer
)
snake_case__ =self.tokenizer.generator if isinstance(self.tokenizer , _UpperCAmelCase ) else self.tokenizer
snake_case__ =encode_line(_UpperCAmelCase , _UpperCAmelCase , self.max_source_length , 'right' )
snake_case__ =encode_line(_UpperCAmelCase , _UpperCAmelCase , self.max_target_length , 'right' )
snake_case__ =source_inputs['input_ids'].squeeze()
snake_case__ =target_inputs['input_ids'].squeeze()
snake_case__ =source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowercase ( _UpperCAmelCase ) -> Any:
return [len(_UpperCAmelCase ) for x in Path(_UpperCAmelCase ).open().readlines()]
def _lowercase ( self , _UpperCAmelCase ) -> Dict[str, torch.Tensor]:
snake_case__ =torch.stack([x['input_ids'] for x in batch] )
snake_case__ =torch.stack([x['attention_mask'] for x in batch] )
snake_case__ =torch.stack([x['decoder_input_ids'] for x in batch] )
snake_case__ =(
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _UpperCAmelCase )
else self.tokenizer.pad_token_id
)
snake_case__ =(
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _UpperCAmelCase )
else self.tokenizer.pad_token_id
)
snake_case__ =trim_batch(_UpperCAmelCase , _UpperCAmelCase )
snake_case__ , snake_case__ =trim_batch(_UpperCAmelCase , _UpperCAmelCase , attention_mask=_UpperCAmelCase )
snake_case__ ={
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
SCREAMING_SNAKE_CASE__ : Union[str, Any] = getLogger(__name__)
def a ( UpperCamelCase_ : List[List] ) -> str:
return list(itertools.chain.from_iterable(UpperCamelCase_ ) )
def a ( UpperCamelCase_ : str ) -> None:
snake_case__ =get_git_info()
save_json(UpperCamelCase_ , os.path.join(UpperCamelCase_ , 'git_log.json' ) )
def a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str]=4 , **UpperCamelCase_ : Tuple ) -> Dict:
with open(UpperCamelCase_ , 'w' ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ , indent=UpperCamelCase_ , **UpperCamelCase_ )
def a ( UpperCamelCase_ : str ) -> Dict:
with open(UpperCamelCase_ ) as f:
return json.load(UpperCamelCase_ )
def a ( ) -> Optional[int]:
snake_case__ =git.Repo(search_parent_directories=UpperCamelCase_ )
snake_case__ ={
'repo_id': str(UpperCamelCase_ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def a ( UpperCamelCase_ : Callable , UpperCamelCase_ : Iterable ) -> List:
return list(map(UpperCamelCase_ , UpperCamelCase_ ) )
def a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] ) -> Union[str, Any]:
with open(UpperCamelCase_ , 'wb' ) as f:
return pickle.dump(UpperCamelCase_ , UpperCamelCase_ )
def a ( UpperCamelCase_ : List[str] ) -> int:
def remove_articles(UpperCamelCase_ : Dict ):
return re.sub(r'\b(a|an|the)\b' , ' ' , UpperCamelCase_ )
def white_space_fix(UpperCamelCase_ : str ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase_ : Union[str, Any] ):
snake_case__ =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase_ : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase_ ) ) ) )
def a ( UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] ) -> List[Any]:
snake_case__ =normalize_answer(UpperCamelCase_ ).split()
snake_case__ =normalize_answer(UpperCamelCase_ ).split()
snake_case__ =Counter(UpperCamelCase_ ) & Counter(UpperCamelCase_ )
snake_case__ =sum(common.values() )
if num_same == 0:
return 0
snake_case__ =1.0 * num_same / len(UpperCamelCase_ )
snake_case__ =1.0 * num_same / len(UpperCamelCase_ )
snake_case__ =(2 * precision * recall) / (precision + recall)
return fa
def a ( UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ) -> Optional[int]:
return normalize_answer(UpperCamelCase_ ) == normalize_answer(UpperCamelCase_ )
def a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] ) -> Dict:
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
snake_case__ =0
for hypo, pred in zip(UpperCamelCase_ , UpperCamelCase_ ):
em += exact_match_score(UpperCamelCase_ , UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
em /= len(UpperCamelCase_ )
return {"em": em}
def a ( UpperCamelCase_ : Optional[Any] ) -> List[Any]:
return model_prefix.startswith('rag' )
def a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Dict ) -> Tuple:
snake_case__ ={p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case__ ='dropout_rate'
for p in extra_params:
if getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if not hasattr(UpperCamelCase_ , UpperCamelCase_ ) and not hasattr(UpperCamelCase_ , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(UpperCamelCase_ ) )
delattr(UpperCamelCase_ , UpperCamelCase_ )
continue
snake_case__ =p if hasattr(UpperCamelCase_ , UpperCamelCase_ ) else equivalent_param[p]
setattr(UpperCamelCase_ , UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
delattr(UpperCamelCase_ , UpperCamelCase_ )
return hparams, config
| 581 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class a__:
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=99 , _UpperCAmelCase=0 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase="last" , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=0 , ) -> Optional[int]:
snake_case__ =parent
snake_case__ =batch_size
snake_case__ =seq_length
snake_case__ =is_training
snake_case__ =use_input_lengths
snake_case__ =use_token_type_ids
snake_case__ =use_labels
snake_case__ =gelu_activation
snake_case__ =sinusoidal_embeddings
snake_case__ =causal
snake_case__ =asm
snake_case__ =n_langs
snake_case__ =vocab_size
snake_case__ =n_special
snake_case__ =hidden_size
snake_case__ =num_hidden_layers
snake_case__ =num_attention_heads
snake_case__ =hidden_dropout_prob
snake_case__ =attention_probs_dropout_prob
snake_case__ =max_position_embeddings
snake_case__ =type_sequence_label_size
snake_case__ =initializer_range
snake_case__ =num_labels
snake_case__ =num_choices
snake_case__ =summary_type
snake_case__ =use_proj
snake_case__ =scope
snake_case__ =bos_token_id
def _lowercase ( self ) -> Any:
snake_case__ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ =random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ =None
if self.use_input_lengths:
snake_case__ =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case__ =None
if self.use_token_type_ids:
snake_case__ =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case__ =None
snake_case__ =None
snake_case__ =None
if self.use_labels:
snake_case__ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ =ids_tensor([self.batch_size] , 2 ).float()
snake_case__ =ids_tensor([self.batch_size] , self.num_choices )
snake_case__ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowercase ( self ) -> Union[str, Any]:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Tuple:
snake_case__ =XLMModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase , lengths=_UpperCAmelCase , langs=_UpperCAmelCase )
snake_case__ =model(_UpperCAmelCase , langs=_UpperCAmelCase )
snake_case__ =model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> str:
snake_case__ =XLMWithLMHeadModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> str:
snake_case__ =XLMForQuestionAnsweringSimple(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase )
snake_case__ =model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
snake_case__ =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Dict:
snake_case__ =XLMForQuestionAnswering(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase )
snake_case__ =model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , p_mask=_UpperCAmelCase , )
snake_case__ =model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , )
((snake_case__) , ) =result_with_labels.to_tuple()
snake_case__ =model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
((snake_case__) , ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Any:
snake_case__ =XLMForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase )
snake_case__ =model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[Any]:
snake_case__ =self.num_labels
snake_case__ =XLMForTokenClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> int:
snake_case__ =self.num_choices
snake_case__ =XLMForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ =model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self ) -> str:
snake_case__ =self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) =config_and_inputs
snake_case__ ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class a__( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
a_ : Optional[int] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
a_ : Optional[Any] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
a_ : Any = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> str:
snake_case__ =super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
snake_case__ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
snake_case__ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def _lowercase ( self ) -> Optional[int]:
snake_case__ =XLMModelTester(self )
snake_case__ =ConfigTester(self , config_class=_UpperCAmelCase , emb_dim=37 )
def _lowercase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _lowercase ( self ) -> int:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_UpperCAmelCase )
def _lowercase ( self ) -> Dict:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_UpperCAmelCase )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_UpperCAmelCase )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_UpperCAmelCase )
def _lowercase ( self ) -> str:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_UpperCAmelCase )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_UpperCAmelCase )
def _lowercase ( self ) -> str:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_UpperCAmelCase )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=1 ) -> Dict:
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(
[isinstance(_UpperCAmelCase , _UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(_UpperCAmelCase ) )
self.assertEqual(len(_UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_UpperCAmelCase ):
# adds PAD dummy token
snake_case__ =min_length + idx + 1
snake_case__ =min_length + idx + 1
snake_case__ =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_UpperCAmelCase ) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=1 ) -> int:
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(
[isinstance(_UpperCAmelCase , _UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(_UpperCAmelCase ) , )
self.assertEqual(len(_UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_UpperCAmelCase ):
# adds PAD dummy token
snake_case__ =min_length + idx + 1
snake_case__ =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_UpperCAmelCase ) , )
pass
@slow
def _lowercase ( self ) -> Dict:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ =XLMModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class a__( unittest.TestCase ):
@slow
def _lowercase ( self ) -> str:
snake_case__ =XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(_UpperCAmelCase )
snake_case__ =torch.tensor([[14, 447]] , dtype=torch.long , device=_UpperCAmelCase ) # the president
snake_case__ =[
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
snake_case__ =model.generate(_UpperCAmelCase , do_sample=_UpperCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _UpperCAmelCase )
| 581 | 1 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowercase : int | float | str , lowercase : int | float | str ):
'''simple docstring'''
if nth_term == "":
return [""]
lowerCamelCase_ = int(__lowerCamelCase )
lowerCamelCase_ = int(__lowerCamelCase )
lowerCamelCase_ = []
for temp in range(int(__lowerCamelCase ) ):
series.append(f"""1 / {pow(temp + 1 , int(__lowerCamelCase ) )}""" if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Optional[Any] = int(input("Enter the last number (nth term) of the P-Series"))
lowerCamelCase : List[str] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 70 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self , snake_case , snake_case=2 , snake_case=True , snake_case=False , snake_case=1_0 , snake_case=3 , snake_case=3_2 * 8 , snake_case=3_2 * 8 , snake_case=4 , snake_case=6_4 , ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] =parent
_UpperCAmelCase : Optional[int] =batch_size
_UpperCAmelCase : List[str] =is_training
_UpperCAmelCase : Union[str, Any] =use_auxiliary_loss
_UpperCAmelCase : Dict =num_queries
_UpperCAmelCase : Tuple =num_channels
_UpperCAmelCase : Optional[Any] =min_size
_UpperCAmelCase : Any =max_size
_UpperCAmelCase : Optional[int] =num_labels
_UpperCAmelCase : Optional[int] =hidden_dim
_UpperCAmelCase : Dict =hidden_dim
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : List[str] =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
snake_case)
_UpperCAmelCase : List[str] =torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case)
_UpperCAmelCase : int =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case) > 0.5
).float()
_UpperCAmelCase : Union[str, Any] =(torch.rand((self.batch_size, self.num_labels) , device=snake_case) > 0.5).long()
_UpperCAmelCase : Tuple =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCAmelCase : int =self.num_queries
_UpperCAmelCase : int =self.num_labels
_UpperCAmelCase : List[Any] =[1, 1, 1, 1]
_UpperCAmelCase : int =self.num_channels
_UpperCAmelCase : List[Any] =6_4
_UpperCAmelCase : Optional[Any] =1_2_8
_UpperCAmelCase : List[Any] =self.hidden_dim
_UpperCAmelCase : Dict =self.hidden_dim
_UpperCAmelCase : int =self.hidden_dim
return config
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] =self.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] ={'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowerCAmelCase ( self , snake_case , snake_case) -> str:
'''simple docstring'''
_UpperCAmelCase : int =output.encoder_hidden_states
_UpperCAmelCase : Dict =output.pixel_decoder_hidden_states
_UpperCAmelCase : List[Any] =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case) , len(config.backbone_config.depths))
self.parent.assertTrue(len(snake_case) , len(config.backbone_config.depths))
self.parent.assertTrue(len(snake_case) , config.decoder_layers)
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case=False) -> Any:
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : Tuple =MaskaFormerModel(config=snake_case)
model.to(snake_case)
model.eval()
_UpperCAmelCase : List[Any] =model(pixel_values=snake_case , pixel_mask=snake_case)
_UpperCAmelCase : Union[str, Any] =model(snake_case , output_hidden_states=snake_case)
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(snake_case , snake_case)
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict =MaskaFormerForUniversalSegmentation(config=snake_case)
model.to(snake_case)
model.eval()
def comm_check_on_output(snake_case):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
_UpperCAmelCase : Optional[Any] =model(pixel_values=snake_case , pixel_mask=snake_case)
_UpperCAmelCase : Optional[Any] =model(snake_case)
comm_check_on_output(snake_case)
_UpperCAmelCase : str =model(
pixel_values=snake_case , pixel_mask=snake_case , mask_labels=snake_case , class_labels=snake_case)
comm_check_on_output(snake_case)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class __magic_name__ ( lowerCAmelCase ,lowerCAmelCase ,unittest.TestCase ):
UpperCAmelCase =(MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCAmelCase ={"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
UpperCAmelCase =False
UpperCAmelCase =False
UpperCAmelCase =False
UpperCAmelCase =False
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =MaskaFormerModelTester(self)
_UpperCAmelCase : Dict =ConfigTester(self , config_class=snake_case , has_text_modality=snake_case)
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case , **snake_case , output_hidden_states=snake_case)
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*snake_case)
@unittest.skip(reason='Mask2Former does not use inputs_embeds')
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method')
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model')
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings')
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`')
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
pass
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[Any] =model_class(snake_case)
_UpperCAmelCase : Optional[int] =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : str =[*signature.parameters.keys()]
_UpperCAmelCase : Tuple =['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case)
@slow
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCAmelCase : Dict =MaskaFormerModel.from_pretrained(snake_case)
self.assertIsNotNone(snake_case)
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any =(self.model_tester.min_size,) * 2
_UpperCAmelCase : Optional[Any] ={
'pixel_values': torch.randn((2, 3, *size) , device=snake_case),
'mask_labels': torch.randn((2, 1_0, *size) , device=snake_case),
'class_labels': torch.zeros(2 , 1_0 , device=snake_case).long(),
}
_UpperCAmelCase : List[str] =self.model_tester.get_config()
_UpperCAmelCase : Tuple =MaskaFormerForUniversalSegmentation(snake_case).to(snake_case)
_UpperCAmelCase : Any =model(**snake_case)
self.assertTrue(outputs.loss is not None)
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case , **snake_case , output_hidden_states=snake_case)
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] =model_class(snake_case).to(snake_case)
_UpperCAmelCase : Dict =model(**snake_case , output_attentions=snake_case)
self.assertTrue(outputs.attentions is not None)
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : List[str] =self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict =self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : List[str] =model_class(snake_case)
model.to(snake_case)
model.train()
_UpperCAmelCase : Tuple =model(snake_case , mask_labels=snake_case , class_labels=snake_case).loss
loss.backward()
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple =self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str =self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Tuple =True
_UpperCAmelCase : str =True
_UpperCAmelCase : Union[str, Any] =model_class(snake_case).to(snake_case)
model.train()
_UpperCAmelCase : Union[str, Any] =model(snake_case , mask_labels=snake_case , class_labels=snake_case)
_UpperCAmelCase : Any =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase : Any =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCAmelCase : int =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase : List[str] =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
lowercase =1e-4
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple =MaskaFormerModel.from_pretrained(self.model_checkpoints).to(snake_case)
_UpperCAmelCase : List[str] =self.default_image_processor
_UpperCAmelCase : int =prepare_img()
_UpperCAmelCase : Any =image_processor(snake_case , return_tensors='pt').to(snake_case)
_UpperCAmelCase : Dict =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(snake_case , (1, 3, 3_8_4, 3_8_4))
with torch.no_grad():
_UpperCAmelCase : int =model(**snake_case)
_UpperCAmelCase : List[Any] =torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]]).to(snake_case)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case , atol=snake_case))
_UpperCAmelCase : Dict =torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]]).to(snake_case)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case , atol=snake_case))
_UpperCAmelCase : Optional[int] =torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]]).to(snake_case)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case , atol=snake_case))
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(snake_case).eval()
_UpperCAmelCase : str =self.default_image_processor
_UpperCAmelCase : Any =prepare_img()
_UpperCAmelCase : Dict =image_processor(snake_case , return_tensors='pt').to(snake_case)
_UpperCAmelCase : List[Any] =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(snake_case , (1, 3, 3_8_4, 3_8_4))
with torch.no_grad():
_UpperCAmelCase : Dict =model(**snake_case)
# masks_queries_logits
_UpperCAmelCase : Union[str, Any] =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4))
_UpperCAmelCase : str =[
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
_UpperCAmelCase : Union[str, Any] =torch.tensor(snake_case).to(snake_case)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case , atol=snake_case))
# class_queries_logits
_UpperCAmelCase : Any =outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1))
_UpperCAmelCase : Optional[Any] =torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
]).to(snake_case)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case , atol=snake_case))
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Tuple =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(snake_case).eval()
_UpperCAmelCase : Optional[Any] =self.default_image_processor
_UpperCAmelCase : List[Any] =image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3)), np.zeros((3, 8_0_0, 1_3_3_3))] , segmentation_maps=[np.zeros((3_8_4, 3_8_4)).astype(np.floataa), np.zeros((3_8_4, 3_8_4)).astype(np.floataa)] , return_tensors='pt' , )
_UpperCAmelCase : int =inputs['pixel_values'].to(snake_case)
_UpperCAmelCase : Union[str, Any] =[el.to(snake_case) for el in inputs['mask_labels']]
_UpperCAmelCase : Tuple =[el.to(snake_case) for el in inputs['class_labels']]
with torch.no_grad():
_UpperCAmelCase : List[str] =model(**snake_case)
self.assertTrue(outputs.loss is not None)
| 446 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase :List[Any] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class UpperCAmelCase ( __snake_case ):
a: List[str] = "codegen"
a: Tuple = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self: Union[str, Any] , __UpperCamelCase: Dict=5_0400 , __UpperCamelCase: Any=2048 , __UpperCamelCase: Optional[Any]=2048 , __UpperCamelCase: Union[str, Any]=4096 , __UpperCamelCase: List[str]=28 , __UpperCamelCase: Dict=16 , __UpperCamelCase: Any=64 , __UpperCamelCase: Union[str, Any]=None , __UpperCamelCase: Tuple="gelu_new" , __UpperCamelCase: Optional[Any]=0.0 , __UpperCamelCase: int=0.0 , __UpperCamelCase: Optional[int]=0.0 , __UpperCamelCase: Dict=1E-5 , __UpperCamelCase: Dict=0.0_2 , __UpperCamelCase: List[str]=True , __UpperCamelCase: Tuple=5_0256 , __UpperCamelCase: Optional[int]=5_0256 , __UpperCamelCase: Union[str, Any]=False , **__UpperCamelCase: List[Any] , ):
_a = vocab_size
_a = n_ctx
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = rotary_dim
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = use_cache
_a = bos_token_id
_a = eos_token_id
super().__init__(
bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , tie_word_embeddings=__UpperCamelCase , **__UpperCamelCase )
class UpperCAmelCase ( __snake_case ):
def __init__( self: Tuple , __UpperCamelCase: PretrainedConfig , __UpperCamelCase: str = "default" , __UpperCamelCase: List[PatchingSpec] = None , __UpperCamelCase: bool = False , ):
super().__init__(__UpperCamelCase , task=__UpperCamelCase , patching_specs=__UpperCamelCase , use_past=__UpperCamelCase )
if not getattr(self._config , '''pad_token_id''' , __UpperCamelCase ):
# TODO: how to do that better?
_a = 0
@property
def _A ( self: Optional[int] ):
_a = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction='''inputs''' )
_a = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _A ( self: Union[str, Any] ):
return self._config.n_layer
@property
def _A ( self: List[str] ):
return self._config.n_head
def _A ( self: Union[str, Any] , __UpperCamelCase: PreTrainedTokenizer , __UpperCamelCase: int = -1 , __UpperCamelCase: int = -1 , __UpperCamelCase: bool = False , __UpperCamelCase: Optional[TensorType] = None , ):
_a = super(__UpperCamelCase , self ).generate_dummy_inputs(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
# We need to order the input in the way they appears in the forward()
_a = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_a = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(self.num_layers )
]
_a = common_inputs['''attention_mask''']
if self.use_past:
_a = ordered_inputs['''attention_mask'''].dtype
_a = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
return ordered_inputs
@property
def _A ( self: Dict ):
return 13 | 704 |
import math
import sys
import cva
import numpy as np
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
_a = math.sqrt(_UpperCamelCase )
_a = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> np.ndarray:
_a = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
_a = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _UpperCamelCase ):
for j in range(0 , _UpperCamelCase ):
_a = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_UpperCamelCase , _UpperCamelCase )
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> np.ndarray:
_a = np.zeros(img.shape )
_a = get_gauss_kernel(_UpperCamelCase , _UpperCamelCase )
_a , _a = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
_a = get_slice(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_a = img_s - img_s[kernel_size // 2, kernel_size // 2]
_a = vec_gaussian(_UpperCamelCase , _UpperCamelCase )
_a = np.multiply(_UpperCamelCase , _UpperCamelCase )
_a = np.multiply(_UpperCamelCase , _UpperCamelCase )
_a = np.sum(_UpperCamelCase ) / np.sum(_UpperCamelCase )
_a = val
return imga
def __snake_case ( _UpperCamelCase ) -> tuple:
_a = args[1] if args[1:] else '''../image_data/lena.jpg'''
_a = float(args[2] ) if args[2:] else 1.0
_a = float(args[3] ) if args[3:] else 1.0
if args[4:]:
_a = int(args[4] )
_a = kernel_size + abs(kernel_size % 2 - 1 )
else:
_a = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :List[Any] = parse_args(sys.argv)
lowerCamelCase :List[Any] = cva.imread(filename, 0)
cva.imshow('input image', img)
lowerCamelCase :Optional[Any] = img / 255
lowerCamelCase :Any = out.astype('float32')
lowerCamelCase :Tuple = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowerCamelCase :Dict = out * 255
lowerCamelCase :Any = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 346 | 0 |
from __future__ import annotations
class _a :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : str ):
A_ , A_ = text, pattern
A_ , A_ = len(UpperCAmelCase ), len(UpperCAmelCase )
def __A ( self : Union[str, Any] , UpperCAmelCase : str ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __A ( self : List[str] , UpperCAmelCase : int ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __A ( self : Optional[int] ):
# searches pattern in text and returns index positions
A_ = []
for i in range(self.textLen - self.patLen + 1 ):
A_ = self.mismatch_in_text(UpperCAmelCase )
if mismatch_index == -1:
positions.append(UpperCAmelCase )
else:
A_ = self.match_in_pattern(self.text[mismatch_index] )
A_ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__a :Dict = 'ABAABA'
__a :int = 'AB'
__a :List[str] = BoyerMooreSearch(text, pattern)
__a :str = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions) | 86 |
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : int = 0 ):
"""simple docstring"""
A_ = length or len(__UpperCamelCase )
A_ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A_ , A_ = list_data[i + 1], list_data[i]
A_ = True
return list_data if not swapped else bubble_sort(__UpperCamelCase ,length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _UpperCamelCase ( A , A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AutoencoderKL
lowerCAmelCase__ = """sample"""
lowerCAmelCase__ = 1E-2
@property
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =4
__lowercase =3
__lowercase =(3_2, 3_2)
__lowercase =floats_tensor((batch_size, num_channels) + sizes).to(_lowerCAmelCase)
return {"sample": image}
@property
def __lowerCamelCase ( self : Any):
'''simple docstring'''
return (3, 3_2, 3_2)
@property
def __lowerCamelCase ( self : int):
'''simple docstring'''
return (3, 3_2, 3_2)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase ={
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
__lowercase =self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
pass
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS')
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase , __lowercase =self.prepare_init_args_and_inputs_for_common()
__lowercase =self.model_class(**_lowerCAmelCase)
model.to(_lowerCAmelCase)
assert not model.is_gradient_checkpointing and model.training
__lowercase =model(**_lowerCAmelCase).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__lowercase =torch.randn_like(_lowerCAmelCase)
__lowercase =(out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__lowercase =self.model_class(**_lowerCAmelCase)
# clone model
model_a.load_state_dict(model.state_dict())
model_a.to(_lowerCAmelCase)
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__lowercase =model_a(**_lowerCAmelCase).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__lowercase =(out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5)
__lowercase =dict(model.named_parameters())
__lowercase =dict(model_a.named_parameters())
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5))
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase , __lowercase =AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=_lowerCAmelCase)
self.assertIsNotNone(_lowerCAmelCase)
self.assertEqual(len(loading_info['missing_keys']) , 0)
model.to(_lowerCAmelCase)
__lowercase =model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy')
__lowercase =model.to(_lowerCAmelCase)
model.eval()
if torch_device == "mps":
__lowercase =torch.manual_seed(0)
else:
__lowercase =torch.Generator(device=_lowerCAmelCase).manual_seed(0)
__lowercase =torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0) , )
__lowercase =image.to(_lowerCAmelCase)
with torch.no_grad():
__lowercase =model(_lowerCAmelCase , sample_posterior=_lowerCAmelCase , generator=_lowerCAmelCase).sample
__lowercase =output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__lowercase =torch.tensor(
[
-4.00_78e-01,
-3.83_23e-04,
-1.26_81e-01,
-1.14_62e-01,
2.00_95e-01,
1.08_93e-01,
-8.82_47e-02,
-3.03_61e-01,
-9.86_44e-03,
])
elif torch_device == "cpu":
__lowercase =torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026])
else:
__lowercase =torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485])
self.assertTrue(torch_all_close(_lowerCAmelCase , _lowerCAmelCase , rtol=1e-2))
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : str , _lowerCAmelCase : int , _lowerCAmelCase : List[str]):
'''simple docstring'''
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCAmelCase) for s in shape])}.npy"""
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : Tuple=(4, 3, 5_1_2, 5_1_2) , _lowerCAmelCase : Optional[Any]=False):
'''simple docstring'''
__lowercase =torch.floataa if fpaa else torch.floataa
__lowercase =torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase))).to(_lowerCAmelCase).to(_lowerCAmelCase)
return image
def __lowerCamelCase ( self : Any , _lowerCAmelCase : Optional[int]="CompVis/stable-diffusion-v1-4" , _lowerCAmelCase : Union[str, Any]=False):
'''simple docstring'''
__lowercase ='fp16' if fpaa else None
__lowercase =torch.floataa if fpaa else torch.floataa
__lowercase =AutoencoderKL.from_pretrained(
_lowerCAmelCase , subfolder='vae' , torch_dtype=_lowerCAmelCase , revision=_lowerCAmelCase , )
model.to(_lowerCAmelCase).eval()
return model
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : Dict=0):
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(_lowerCAmelCase)
return torch.Generator(device=_lowerCAmelCase).manual_seed(_lowerCAmelCase)
@parameterized.expand(
[
# fmt: off
[3_3, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[4_7, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
])
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
__lowercase =self.get_sd_vae_model()
__lowercase =self.get_sd_image(_lowerCAmelCase)
__lowercase =self.get_generator(_lowerCAmelCase)
with torch.no_grad():
__lowercase =model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase).sample
assert sample.shape == image.shape
__lowercase =sample[-1, -2:, -2:, :2].flatten().float().cpu()
__lowercase =torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice)
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3e-3)
@parameterized.expand(
[
# fmt: off
[3_3, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[4_7, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
])
@require_torch_gpu
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase =self.get_sd_vae_model(fpaa=_lowerCAmelCase)
__lowercase =self.get_sd_image(_lowerCAmelCase , fpaa=_lowerCAmelCase)
__lowercase =self.get_generator(_lowerCAmelCase)
with torch.no_grad():
__lowercase =model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase).sample
assert sample.shape == image.shape
__lowercase =sample[-1, -2:, :2, -2:].flatten().float().cpu()
__lowercase =torch.tensor(_lowerCAmelCase)
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1e-2)
@parameterized.expand(
[
# fmt: off
[3_3, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[4_7, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
])
def __lowerCamelCase ( self : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : int):
'''simple docstring'''
__lowercase =self.get_sd_vae_model()
__lowercase =self.get_sd_image(_lowerCAmelCase)
with torch.no_grad():
__lowercase =model(_lowerCAmelCase).sample
assert sample.shape == image.shape
__lowercase =sample[-1, -2:, -2:, :2].flatten().float().cpu()
__lowercase =torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice)
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3e-3)
@parameterized.expand(
[
# fmt: off
[1_3, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[3_7, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
])
@require_torch_gpu
def __lowerCamelCase ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any]):
'''simple docstring'''
__lowercase =self.get_sd_vae_model()
__lowercase =self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 6_4, 6_4))
with torch.no_grad():
__lowercase =model.decode(_lowerCAmelCase).sample
assert list(sample.shape) == [3, 3, 5_1_2, 5_1_2]
__lowercase =sample[-1, -2:, :2, -2:].flatten().cpu()
__lowercase =torch.tensor(_lowerCAmelCase)
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3)
@parameterized.expand(
[
# fmt: off
[2_7, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[1_6, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
])
@require_torch_gpu
def __lowerCamelCase ( self : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : int):
'''simple docstring'''
__lowercase =self.get_sd_vae_model(fpaa=_lowerCAmelCase)
__lowercase =self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 6_4, 6_4) , fpaa=_lowerCAmelCase)
with torch.no_grad():
__lowercase =model.decode(_lowerCAmelCase).sample
assert list(sample.shape) == [3, 3, 5_1_2, 5_1_2]
__lowercase =sample[-1, -2:, :2, -2:].flatten().float().cpu()
__lowercase =torch.tensor(_lowerCAmelCase)
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=5e-3)
@parameterized.expand([(1_3,), (1_6,), (2_7,)])
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.')
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : Dict):
'''simple docstring'''
__lowercase =self.get_sd_vae_model(fpaa=_lowerCAmelCase)
__lowercase =self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 6_4, 6_4) , fpaa=_lowerCAmelCase)
with torch.no_grad():
__lowercase =model.decode(_lowerCAmelCase).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__lowercase =model.decode(_lowerCAmelCase).sample
assert list(sample.shape) == [3, 3, 5_1_2, 5_1_2]
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1e-1)
@parameterized.expand([(1_3,), (1_6,), (3_7,)])
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.')
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase =self.get_sd_vae_model()
__lowercase =self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 6_4, 6_4))
with torch.no_grad():
__lowercase =model.decode(_lowerCAmelCase).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__lowercase =model.decode(_lowerCAmelCase).sample
assert list(sample.shape) == [3, 3, 5_1_2, 5_1_2]
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1e-2)
@parameterized.expand(
[
# fmt: off
[3_3, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[4_7, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
])
def __lowerCamelCase ( self : Any , _lowerCAmelCase : int , _lowerCAmelCase : int):
'''simple docstring'''
__lowercase =self.get_sd_vae_model()
__lowercase =self.get_sd_image(_lowerCAmelCase)
__lowercase =self.get_generator(_lowerCAmelCase)
with torch.no_grad():
__lowercase =model.encode(_lowerCAmelCase).latent_dist
__lowercase =dist.sample(generator=_lowerCAmelCase)
assert list(sample.shape) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__lowercase =sample[0, -1, -3:, -3:].flatten().cpu()
__lowercase =torch.tensor(_lowerCAmelCase)
__lowercase =3e-3 if torch_device != 'mps' else 1e-2
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=_lowerCAmelCase)
| 719 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowerCamelCase = logging.get_logger(__name__)
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : Optional[int] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase)
| 454 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
class a__ :
def __init__( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : list[dict] = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(_UpperCamelCase )
self.set_fail_transitions()
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : int = 0
for character in keyword:
_lowercase : Optional[Any] = self.find_next_state(_UpperCamelCase , _UpperCamelCase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
_lowercase : Any = len(self.adlist ) - 1
else:
_lowercase : Dict = next_state
self.adlist[current_state]["output"].append(_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(_UpperCamelCase )
_lowercase : Optional[Any] = 0
while q:
_lowercase : Dict = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_UpperCamelCase )
_lowercase : Tuple = self.adlist[r]["fail_state"]
while (
self.find_next_state(_UpperCamelCase , self.adlist[child]["value"] ) is None
and state != 0
):
_lowercase : Dict = self.adlist[state]["fail_state"]
_lowercase : Any = self.find_next_state(
_UpperCamelCase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
_lowercase : List[str] = 0
_lowercase : Dict = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : dict = {} # returns a dict with keywords and list of its occurrences
_lowercase : Optional[Any] = 0
for i in range(len(_UpperCamelCase ) ):
while (
self.find_next_state(_UpperCamelCase , string[i] ) is None
and current_state != 0
):
_lowercase : Union[str, Any] = self.adlist[current_state]["fail_state"]
_lowercase : str = self.find_next_state(_UpperCamelCase , string[i] )
if next_state is None:
_lowercase : int = 0
else:
_lowercase : str = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_lowercase : Optional[Any] = []
result[key].append(i - len(_UpperCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245 | 0 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_A : str =(720, 1_280) # Height, Width
_A : List[Any] =(0.4, 0.6) # if height or width lower than this scale, drop it.
_A : int =1 / 100
_A : List[str] =''''''
_A : Dict =''''''
_A : List[str] =''''''
_A : List[Any] =250
def SCREAMING_SNAKE_CASE_ () -> None:
lowerCamelCase__ : Union[str, Any] = get_dataset(UpperCamelCase , UpperCamelCase )
for index in range(UpperCamelCase ):
lowerCamelCase__ : List[str] = random.sample(range(len(UpperCamelCase ) ) , 4 )
lowerCamelCase__ : Any = update_image_and_anno(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , filter_scale=UpperCamelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCamelCase__ : Any = random_chars(32 )
lowerCamelCase__ : Tuple = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
lowerCamelCase__ : Tuple = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' , UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
lowerCamelCase__ : Dict = []
for anno in new_annos:
lowerCamelCase__ : List[str] = anno[3] - anno[1]
lowerCamelCase__ : Any = anno[4] - anno[2]
lowerCamelCase__ : Tuple = anno[1] + width / 2
lowerCamelCase__ : Optional[int] = anno[2] + height / 2
lowerCamelCase__ : Union[str, Any] = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(UpperCamelCase )
with open(f'''{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[list, list]:
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : Tuple = []
for label_file in glob.glob(os.path.join(UpperCamelCase , """*.txt""" ) ):
lowerCamelCase__ : int = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(UpperCamelCase ) as in_file:
lowerCamelCase__ : Union[str, Any] = in_file.readlines()
lowerCamelCase__ : Dict = os.path.join(UpperCamelCase , f'''{label_name}.jpg''' )
lowerCamelCase__ : List[Any] = []
for obj_list in obj_lists:
lowerCamelCase__ : Dict = obj_list.rstrip("""\n""" ).split(""" """ )
lowerCamelCase__ : Dict = float(obj[1] ) - float(obj[3] ) / 2
lowerCamelCase__ : List[str] = float(obj[2] ) - float(obj[4] ) / 2
lowerCamelCase__ : Optional[Any] = float(obj[1] ) + float(obj[3] ) / 2
lowerCamelCase__ : List[Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCamelCase )
labels.append(UpperCamelCase )
return img_paths, labels
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 0.0 , ) -> tuple[list, list, str]:
lowerCamelCase__ : str = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowerCamelCase__ : Any = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCamelCase__ : Optional[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCamelCase__ : List[str] = int(scale_x * output_size[1] )
lowerCamelCase__ : Dict = int(scale_y * output_size[0] )
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : List[str] = []
for i, index in enumerate(UpperCamelCase ):
lowerCamelCase__ : Dict = all_img_list[index]
path_list.append(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = all_annos[index]
lowerCamelCase__ : List[str] = cva.imread(UpperCamelCase )
if i == 0: # top-left
lowerCamelCase__ : Tuple = cva.resize(UpperCamelCase , (divid_point_x, divid_point_y) )
lowerCamelCase__ : Tuple = img
for bbox in img_annos:
lowerCamelCase__ : Any = bbox[1] * scale_x
lowerCamelCase__ : Union[str, Any] = bbox[2] * scale_y
lowerCamelCase__ : Union[str, Any] = bbox[3] * scale_x
lowerCamelCase__ : Dict = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowerCamelCase__ : Dict = cva.resize(UpperCamelCase , (output_size[1] - divid_point_x, divid_point_y) )
lowerCamelCase__ : Optional[Any] = img
for bbox in img_annos:
lowerCamelCase__ : Any = scale_x + bbox[1] * (1 - scale_x)
lowerCamelCase__ : Tuple = bbox[2] * scale_y
lowerCamelCase__ : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
lowerCamelCase__ : Optional[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowerCamelCase__ : Union[str, Any] = cva.resize(UpperCamelCase , (divid_point_x, output_size[0] - divid_point_y) )
lowerCamelCase__ : str = img
for bbox in img_annos:
lowerCamelCase__ : str = bbox[1] * scale_x
lowerCamelCase__ : Any = scale_y + bbox[2] * (1 - scale_y)
lowerCamelCase__ : int = bbox[3] * scale_x
lowerCamelCase__ : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowerCamelCase__ : List[Any] = cva.resize(
UpperCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowerCamelCase__ : str = img
for bbox in img_annos:
lowerCamelCase__ : List[str] = scale_x + bbox[1] * (1 - scale_x)
lowerCamelCase__ : Any = scale_y + bbox[2] * (1 - scale_y)
lowerCamelCase__ : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
lowerCamelCase__ : Optional[Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowerCamelCase__ : Any = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
lowerCamelCase__ : Optional[int] = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase ) for _ in range(UpperCamelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 703 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_A : List[str] ='''examples/'''
_A : Any ={
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_A : int ={
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
_A : int ='''README.md'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : List[str] = f.read()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern]
lowerCamelCase__ : Dict = replace.replace("""VERSION""" , UpperCamelCase )
lowerCamelCase__ : str = re_pattern.sub(UpperCamelCase , UpperCamelCase )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
for folder, directories, fnames in os.walk(UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , pattern="""examples""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if not patch:
update_version_in_examples(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
lowerCamelCase__ : Dict = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ : Dict = """1. Want to contribute a new model?"""
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : int = f.readlines()
# Find the start of the list.
lowerCamelCase__ : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ : List[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ : int = f.read()
lowerCamelCase__ : Optional[Any] = REPLACE_PATTERNS["""init"""][0].search(UpperCamelCase ).groups()[0]
return packaging.version.parse(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase=False ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ : List[str] = default_version.base_version
elif patch:
lowerCamelCase__ : Any = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ : List[Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ : Any = input(f'''Which version are you releasing? [{default_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Optional[int] = default_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase , patch=UpperCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def SCREAMING_SNAKE_CASE_ () -> List[str]:
lowerCamelCase__ : Optional[int] = get_version()
lowerCamelCase__ : Any = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ : Any = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ : List[Any] = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Dict = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_A : List[str] =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 631 | 0 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
def __init__( self : List[str] , __snake_case : Tuple , __snake_case : int=3 , __snake_case : int=32 , __snake_case : List[Any]=3 , __snake_case : Tuple=10 , __snake_case : Optional[int]=[10, 20, 30, 40] , __snake_case : Optional[int]=[1, 1, 2, 1] , __snake_case : str=True , __snake_case : Optional[int]=True , __snake_case : List[str]="relu" , __snake_case : List[Any]=3 , __snake_case : Tuple=None , ) -> int:
_a : str = parent
_a : str = batch_size
_a : int = image_size
_a : int = num_channels
_a : Optional[Any] = embeddings_size
_a : Optional[Any] = hidden_sizes
_a : List[str] = depths
_a : List[Any] = is_training
_a : Dict = use_labels
_a : Optional[Any] = hidden_act
_a : Optional[int] = num_labels
_a : Union[str, Any] = scope
_a : Tuple = len(__snake_case )
def snake_case_ ( self : Dict ) -> Tuple:
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[Any] = None
if self.use_labels:
_a : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
_a : Optional[int] = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self : Union[str, Any] ) -> Dict:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def snake_case_ ( self : Optional[Any] , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict ) -> str:
_a : Tuple = RegNetModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : Tuple = model(__snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case_ ( self : str , __snake_case : Any , __snake_case : Dict , __snake_case : Any ) -> str:
_a : int = self.num_labels
_a : int = RegNetForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
_a : str = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self : Tuple ) -> Union[str, Any]:
_a : List[str] = self.prepare_config_and_inputs()
_a , _a , _a : Optional[Any] = config_and_inputs
_a : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCAmelCase : Tuple = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
UpperCAmelCase : List[Any] = (
{'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase : Tuple = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Tuple = False
UpperCAmelCase : Optional[Any] = False
def snake_case_ ( self : Tuple ) -> Dict:
_a : List[str] = RegNetModelTester(self )
_a : List[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def snake_case_ ( self : int ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ ( self : Dict ) -> Union[str, Any]:
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def snake_case_ ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def snake_case_ ( self : Optional[int] ) -> Dict:
pass
def snake_case_ ( self : Dict ) -> Tuple:
_a , _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = model_class(__snake_case )
_a : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : str = [*signature.parameters.keys()]
_a : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case_ ( self : Dict ) -> Tuple:
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case_ ( self : Tuple ) -> Optional[Any]:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(config=__snake_case )
for name, module in model.named_modules():
if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def snake_case_ ( self : Dict ) -> List[Any]:
def check_hidden_states_output(__snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : List[str] ):
_a : Union[str, Any] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_a : List[Any] = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_a : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a : List[Any] = self.model_tester.num_stages
self.assertEqual(len(__snake_case ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : List[str] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_a : int = layer_type
_a : Union[str, Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : List[str] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def snake_case_ ( self : List[str] ) -> Tuple:
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def snake_case_ ( self : Optional[Any] ) -> int:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Optional[Any] = RegNetModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowerCamelCase_ ( ):
_a : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def snake_case_ ( self : Any ) -> str:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self : Optional[int] ) -> Optional[int]:
_a : int = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__snake_case )
_a : int = self.default_image_processor
_a : Union[str, Any] = prepare_img()
_a : Tuple = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
_a : List[str] = model(**__snake_case )
# verify the logits
_a : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __snake_case )
_a : Tuple = torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1E-4 ) )
| 471 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__UpperCAmelCase : List[str] = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
__UpperCAmelCase : List[str] = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
__UpperCAmelCase : Union[str, Any] = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
return float((preds == labels).mean() )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
_a : int = simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )
_a : List[Any] = float(fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
_a : str = float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )
_a : str = float(spearmanr(UpperCamelCase_ , UpperCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def snake_case_ ( self : Dict ) -> Union[str, Any]:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def snake_case_ ( self : Optional[int] , __snake_case : Any , __snake_case : Any ) -> Union[str, Any]:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__snake_case , __snake_case )}
elif self.config_name == "stsb":
return pearson_and_spearman(__snake_case , __snake_case )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__snake_case , __snake_case )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__snake_case , __snake_case )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 471 | 1 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
lowerCAmelCase_ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ : Any = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase_ : Optional[int] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase_ : Tuple = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase_ : Tuple = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
lowerCAmelCase_ : int = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
lowerCAmelCase_ : Optional[Any] = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
lowerCAmelCase_ : int = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
lowerCAmelCase_ : Dict = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
lowerCAmelCase_ : int = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__a =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a =DPRContextEncoderTokenizer
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__a =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a =DPRQuestionEncoderTokenizer
lowerCAmelCase_ : str = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
lowerCAmelCase_ : int = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
lowerCAmelCase_ : Optional[Any] = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__A )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __call__( self : Optional[int] , __a : int , __a : Optional[str] = None , __a : Optional[str] = None , __a : Union[bool, str] = False , __a : Union[bool, str] = False , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[bool] = None , **__a : Optional[Any] , ):
if titles is None and texts is None:
return super().__call__(
__a , padding=__a , truncation=__a , max_length=__a , return_tensors=__a , return_attention_mask=__a , **__a , )
elif titles is None or texts is None:
_a = titles if texts is None else texts
return super().__call__(
__a , __a , padding=__a , truncation=__a , max_length=__a , return_tensors=__a , return_attention_mask=__a , **__a , )
_a = titles if not isinstance(__a , __a ) else [titles]
_a = texts if not isinstance(__a , __a ) else [texts]
_a = len(__a )
_a = questions if not isinstance(__a , __a ) else [questions] * n_passages
assert len(__a ) == len(
__a ), f'There should be as many titles than texts but got {len(__a )} titles and {len(__a )} texts.'
_a = super().__call__(__a , __a , padding=__a , truncation=__a )['''input_ids''']
_a = super().__call__(__a , add_special_tokens=__a , padding=__a , truncation=__a )['''input_ids''']
_a = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__a , __a )
]
}
if return_attention_mask is not False:
_a = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_a = attention_mask
return self.pad(__a , padding=__a , max_length=__a , return_tensors=__a )
def UpperCamelCase__ ( self : Dict , __a : BatchEncoding , __a : DPRReaderOutput , __a : int = 16 , __a : int = 64 , __a : int = 4 , ):
_a = reader_input['''input_ids''']
_a = reader_output[:3]
_a = len(__a )
_a = sorted(range(__a ) , reverse=__a , key=relevance_logits.__getitem__ )
_a = []
for doc_id in sorted_docs:
_a = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_a = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_a = sequence_ids.index(self.pad_token_id )
else:
_a = len(__a )
_a = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__a , top_spans=__a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__a , start_index=__a , end_index=__a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase__ ( self : Union[str, Any] , __a : List[int] , __a : List[int] , __a : int , __a : int , ):
_a = []
for start_index, start_score in enumerate(__a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_a = sorted(__a , key=lambda __a : x[1] , reverse=__a )
_a = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
_a = end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__A )
class __SCREAMING_SNAKE_CASE (__A , __A ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =READER_PRETRAINED_VOCAB_FILES_MAP
__a =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =READER_PRETRAINED_INIT_CONFIGURATION
__a =['input_ids', 'attention_mask']
__a =DPRReaderTokenizer
| 708 |
'''simple docstring'''
import math
from collections.abc import Callable
def _lowerCamelCase ( lowercase : Callable[[float], float] , lowercase : float , lowercase : float ) -> float:
_a = xa
_a = xa
while True:
if x_n == x_na or function(lowercase ) == function(lowercase ):
raise ZeroDivisionError("float division by zero, could not find root" )
_a = x_na - (
function(lowercase ) / ((function(lowercase ) - function(lowercase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
_a = x_na
_a = x_na
def _lowerCamelCase ( lowercase : float ) -> float:
return math.pow(lowercase , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 521 | 0 |
def A ( snake_case__ : int , snake_case__ : int ) -> int:
'''simple docstring'''
return number | (1 << position)
def A ( snake_case__ : int , snake_case__ : int ) -> int:
'''simple docstring'''
return number & ~(1 << position)
def A ( snake_case__ : int , snake_case__ : int ) -> int:
'''simple docstring'''
return number ^ (1 << position)
def A ( snake_case__ : int , snake_case__ : int ) -> bool:
'''simple docstring'''
return ((number >> position) & 1) == 1
def A ( snake_case__ : int , snake_case__ : int ) -> int:
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
# General docstring
UpperCAmelCase__ : Optional[int] = "PoolFormerConfig"
# Base docstring
UpperCAmelCase__ : Optional[int] = "sail/poolformer_s12"
UpperCAmelCase__ : Any = [1, 5_12, 7, 7]
# Image classification docstring
UpperCAmelCase__ : List[str] = "sail/poolformer_s12"
UpperCAmelCase__ : Any = "tabby, tabby cat"
UpperCAmelCase__ : Tuple = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def A ( snake_case__ : int , snake_case__ : float = 0.0 , snake_case__ : bool = False ) -> Dict:
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
__snake_case = 1 - drop_prob
__snake_case = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__snake_case = keep_prob + torch.rand(snake_case__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
__snake_case = input.div(snake_case__ ) * random_tensor
return output
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ = None) -> None:
super().__init__()
__snake_case = drop_prob
def _a ( self , lowercase_) -> torch.Tensor:
return drop_path(lowercase_ , self.drop_prob , self.training)
def _a ( self) -> str:
return "p={}".format(self.drop_prob)
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> str:
super().__init__()
__snake_case = patch_size if isinstance(lowercase_ , collections.abc.Iterable) else (patch_size, patch_size)
__snake_case = stride if isinstance(lowercase_ , collections.abc.Iterable) else (stride, stride)
__snake_case = padding if isinstance(lowercase_ , collections.abc.Iterable) else (padding, padding)
__snake_case = nn.Convad(lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=lowercase_)
__snake_case = norm_layer(lowercase_) if norm_layer else nn.Identity()
def _a ( self , lowercase_) -> int:
__snake_case = self.projection(lowercase_)
__snake_case = self.norm(lowercase_)
return embeddings
class __lowercase ( nn.GroupNorm ):
def __init__( self , lowercase_ , **lowercase_) -> Dict:
super().__init__(1 , lowercase_ , **lowercase_)
class __lowercase ( nn.Module ):
def __init__( self , lowercase_) -> Optional[int]:
super().__init__()
__snake_case = nn.AvgPoolad(lowercase_ , stride=1 , padding=pool_size // 2 , count_include_pad=lowercase_)
def _a ( self , lowercase_) -> str:
return self.pool(lowercase_) - hidden_states
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Dict:
super().__init__()
__snake_case = nn.Convad(lowercase_ , lowercase_ , 1)
__snake_case = nn.Convad(lowercase_ , lowercase_ , 1)
__snake_case = PoolFormerDropPath(lowercase_)
if isinstance(config.hidden_act , lowercase_):
__snake_case = ACTaFN[config.hidden_act]
else:
__snake_case = config.hidden_act
def _a ( self , lowercase_) -> int:
__snake_case = self.conva(lowercase_)
__snake_case = self.act_fn(lowercase_)
__snake_case = self.drop(lowercase_)
__snake_case = self.conva(lowercase_)
__snake_case = self.drop(lowercase_)
return hidden_states
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Any:
super().__init__()
__snake_case = PoolFormerPooling(lowercase_)
__snake_case = PoolFormerOutput(lowercase_ , lowercase_ , lowercase_ , lowercase_)
__snake_case = PoolFormerGroupNorm(lowercase_)
__snake_case = PoolFormerGroupNorm(lowercase_)
# Useful for training neural nets
__snake_case = PoolFormerDropPath(lowercase_) if drop_path > 0.0 else nn.Identity()
__snake_case = config.use_layer_scale
if config.use_layer_scale:
__snake_case = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowercase_)) , requires_grad=lowercase_)
__snake_case = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowercase_)) , requires_grad=lowercase_)
def _a ( self , lowercase_) -> Dict:
if self.use_layer_scale:
__snake_case = self.pooling(self.before_norm(lowercase_))
__snake_case = self.layer_scale_a.unsqueeze(-1).unsqueeze(-1) * pooling_output
# First residual connection
__snake_case = hidden_states + self.drop_path(lowercase_)
__snake_case = ()
__snake_case = self.output(self.after_norm(lowercase_))
__snake_case = self.layer_scale_a.unsqueeze(-1).unsqueeze(-1) * layer_output
# Second residual connection
__snake_case = hidden_states + self.drop_path(lowercase_)
__snake_case = (output,) + outputs
return outputs
else:
__snake_case = self.drop_path(self.pooling(self.before_norm(lowercase_)))
# First residual connection
__snake_case = pooling_output + hidden_states
__snake_case = ()
# Second residual connection inside the PoolFormerOutput block
__snake_case = self.drop_path(self.output(self.after_norm(lowercase_)))
__snake_case = hidden_states + layer_output
__snake_case = (output,) + outputs
return outputs
class __lowercase ( nn.Module ):
def __init__( self , lowercase_) -> Dict:
super().__init__()
__snake_case = config
# stochastic depth decay rule
__snake_case = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths))]
# patch embeddings
__snake_case = []
for i in range(config.num_encoder_blocks):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ))
__snake_case = nn.ModuleList(lowercase_)
# Transformer blocks
__snake_case = []
__snake_case = 0
for i in range(config.num_encoder_blocks):
# each block consists of layers
__snake_case = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i]):
layers.append(
PoolFormerLayer(
lowercase_ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio) , drop_path=dpr[cur + j] , ))
blocks.append(nn.ModuleList(lowercase_))
__snake_case = nn.ModuleList(lowercase_)
def _a ( self , lowercase_ , lowercase_=False , lowercase_=True) -> List[str]:
__snake_case = () if output_hidden_states else None
__snake_case = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block)):
__snake_case , __snake_case = layers
# Get patch embeddings from hidden_states
__snake_case = embedding_layer(lowercase_)
# Send the embeddings through the blocks
for _, blk in enumerate(lowercase_):
__snake_case = blk(lowercase_)
__snake_case = layer_outputs[0]
if output_hidden_states:
__snake_case = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_)
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = PoolFormerConfig
__UpperCAmelCase = '''poolformer'''
__UpperCAmelCase = '''pixel_values'''
__UpperCAmelCase = True
def _a ( self , lowercase_) -> List[str]:
if isinstance(lowercase_ , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowercase_ , nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _a ( self , lowercase_ , lowercase_=False) -> int:
if isinstance(lowercase_ , lowercase_):
__snake_case = value
UpperCAmelCase__ : Optional[Any] = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase__ : List[str] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , lowerCamelCase__ , )
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_) -> Optional[Any]:
super().__init__(lowercase_)
__snake_case = config
__snake_case = PoolFormerEncoder(lowercase_)
# Initialize weights and apply final processing
self.post_init()
def _a ( self) -> List[str]:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(lowercase_)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _a ( self , lowercase_ = None , lowercase_ = None , lowercase_ = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
__snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
__snake_case = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ , )
__snake_case = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
class __lowercase ( nn.Module ):
def __init__( self , lowercase_) -> List[str]:
super().__init__()
__snake_case = nn.Linear(config.hidden_size , config.hidden_size)
def _a ( self , lowercase_) -> List[Any]:
__snake_case = self.dense(lowercase_)
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , lowerCamelCase__ , )
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_) -> str:
super().__init__(lowercase_)
__snake_case = config.num_labels
__snake_case = PoolFormerModel(lowercase_)
# Final norm
__snake_case = PoolFormerGroupNorm(config.hidden_sizes[-1])
# Classifier head
__snake_case = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _a ( self , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.poolformer(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ , )
__snake_case = outputs[0]
__snake_case = self.classifier(self.norm(lowercase_).mean([-2, -1]))
__snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__snake_case = 'single_label_classification'
else:
__snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
__snake_case = MSELoss()
if self.num_labels == 1:
__snake_case = loss_fct(logits.squeeze() , labels.squeeze())
else:
__snake_case = loss_fct(lowercase_ , lowercase_)
elif self.config.problem_type == "single_label_classification":
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
__snake_case = BCEWithLogitsLoss()
__snake_case = loss_fct(lowercase_ , lowercase_)
if not return_dict:
__snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states)
| 313 | 1 |
'''simple docstring'''
from math import factorial
def _A ( _lowerCAmelCase = 20 ):
"""simple docstring"""
__lowercase =2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
__lowercase =n // 2
return int(factorial(_lowerCAmelCase ) / (factorial(_lowerCAmelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowerCamelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 454 |
'''simple docstring'''
def _A ( _lowerCAmelCase ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('only integers accepted as input' )
else:
__lowercase =str(abs(_lowerCAmelCase ) )
__lowercase =[list(_lowerCAmelCase ) for char in range(len(_lowerCAmelCase ) )]
for index in range(len(_lowerCAmelCase ) ):
num_transpositions[index].pop(_lowerCAmelCase )
return max(
int(''.join(list(_lowerCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 454 | 1 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def _snake_case ( lowerCAmelCase : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : Optional[int] = job["started_at"]
SCREAMING_SNAKE_CASE_ : int = job["completed_at"]
SCREAMING_SNAKE_CASE_ : Any = date_parser.parse(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = date_parser.parse(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = round((end_datetime - start_datetime).total_seconds() / 60.0 )
SCREAMING_SNAKE_CASE_ : Any = start
SCREAMING_SNAKE_CASE_ : Optional[int] = end
SCREAMING_SNAKE_CASE_ : Dict = duration_in_min
return job_info
def _snake_case ( lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = None
if token is not None:
SCREAMING_SNAKE_CASE_ : Dict = {"Accept": "application/vnd.github+json", "Authorization": f'Bearer {token}'}
SCREAMING_SNAKE_CASE_ : Tuple = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
SCREAMING_SNAKE_CASE_ : Any = requests.get(lowerCAmelCase , headers=lowerCAmelCase ).json()
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(lowerCAmelCase ) for job in result["jobs"]} )
SCREAMING_SNAKE_CASE_ : Optional[Any] = math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = requests.get(url + f'&page={i + 2}' , headers=lowerCAmelCase ).json()
job_time.update({job["name"]: extract_time_from_single_job(lowerCAmelCase ) for job in result["jobs"]} )
return job_time
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
__lowerCamelCase : Tuple = parser.parse_args()
__lowerCamelCase : Tuple = get_job_time(args.workflow_run_id)
__lowerCamelCase : Optional[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v['duration']}''')
| 216 | import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : str = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : List[str] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__lowerCamelCase : Tuple = {
'''moussaKam/mbarthez''': 10_24,
'''moussaKam/barthez''': 10_24,
'''moussaKam/barthez-orangesum-title''': 10_24,
}
__lowerCamelCase : Optional[int] = '''▁'''
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
A = BarthezTokenizer
def __init__( self : Optional[int],_A : int=None,_A : List[Any]=None,_A : Union[str, Any]="<s>",_A : Dict="</s>",_A : Union[str, Any]="</s>",_A : Union[str, Any]="<s>",_A : Optional[Any]="<unk>",_A : str="<pad>",_A : Tuple="<mask>",**_A : Dict,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else mask_token
super().__init__(
_A,tokenizer_file=_A,bos_token=_A,eos_token=_A,unk_token=_A,sep_token=_A,cls_token=_A,pad_token=_A,mask_token=_A,**_A,)
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_file
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False if not self.vocab_file else True
def __UpperCamelCase ( self : Any,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : Tuple,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : List[str],_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : str = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file,_A )
return (out_vocab_file,)
| 216 | 1 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
lowerCamelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _a (self ):
'''simple docstring'''
with self.assertRaises(__a ):
lowerCamelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _a (self ):
'''simple docstring'''
with self.assertRaises(__a ):
lowerCamelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def _a (self ):
'''simple docstring'''
lowerCamelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a (self ):
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCamelCase = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def _a (self ):
'''simple docstring'''
lowerCamelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a (self ):
'''simple docstring'''
lowerCamelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def _a (self ):
'''simple docstring'''
lowerCamelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a (self ):
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCamelCase = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def _a (self ):
'''simple docstring'''
lowerCamelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a (self ):
'''simple docstring'''
lowerCamelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _a (self ):
'''simple docstring'''
import PIL.Image
lowerCamelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=__a ) as mock_cast_to_python_objects:
lowerCamelCase = pa.array(TypedSequence([{"path": None, "bytes": b"image_bytes"}, pil_image] , type=Image() ) )
lowerCamelCase , lowerCamelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , __a )
self.assertFalse(kwargs["optimize_list_casting"] )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = pa.BufferReader(UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , pa.Buffer ) else pa.memory_map(UpperCAmelCase__ )
lowerCamelCase = pa.ipc.open_stream(UpperCAmelCase__ )
lowerCamelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = pa.BufferOutputStream()
lowerCamelCase = pa.schema(UpperCAmelCase__ ) if fields else None
with ArrowWriter(stream=UpperCAmelCase__ , schema=UpperCAmelCase__ , writer_batch_size=UpperCAmelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
lowerCamelCase , lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCAmelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowercase( ):
"""simple docstring"""
lowerCamelCase = pa.BufferOutputStream()
lowerCamelCase = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=UpperCAmelCase__ , features=UpperCAmelCase__ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
lowerCamelCase , lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
lowerCamelCase = pa.BufferReader(output.getvalue() )
lowerCamelCase = pa.ipc.open_stream(UpperCAmelCase__ )
lowerCamelCase = f.read_all()
lowerCamelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(UpperCAmelCase__ )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCAmelCase__ , writer_batch_size=UpperCAmelCase__ , hash_salt="split_name" , check_duplicates=UpperCAmelCase__ , ) as writer:
with pytest.raises(UpperCAmelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
lowerCamelCase , lowerCamelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCAmelCase__ , writer_batch_size=UpperCAmelCase__ , hash_salt="split_name" , check_duplicates=UpperCAmelCase__ , ) as writer:
with pytest.raises(UpperCAmelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
lowerCamelCase , lowerCamelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCAmelCase__ , writer_batch_size=UpperCAmelCase__ , hash_salt="split_name" , check_duplicates=UpperCAmelCase__ , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
lowerCamelCase , lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = pa.BufferOutputStream()
lowerCamelCase = pa.schema(UpperCAmelCase__ ) if fields else None
with ArrowWriter(stream=UpperCAmelCase__ , schema=UpperCAmelCase__ , writer_batch_size=UpperCAmelCase__ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
lowerCamelCase , lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCAmelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = pa.BufferOutputStream()
lowerCamelCase = pa.schema(UpperCAmelCase__ ) if fields else None
with ArrowWriter(stream=UpperCAmelCase__ , schema=UpperCAmelCase__ , writer_batch_size=UpperCAmelCase__ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
lowerCamelCase , lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCAmelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = pa.BufferOutputStream()
lowerCamelCase = pa.schema(UpperCAmelCase__ ) if fields else None
with ArrowWriter(stream=UpperCAmelCase__ , schema=UpperCAmelCase__ , writer_batch_size=UpperCAmelCase__ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
lowerCamelCase , lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCAmelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowercase( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
lowerCamelCase = os.path.join(UpperCAmelCase__ , "test.arrow" )
with ArrowWriter(path=UpperCAmelCase__ , schema=pa.schema(UpperCAmelCase__ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
lowerCamelCase , lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(UpperCAmelCase__ , metadata=writer._schema.metadata )
_check_output(UpperCAmelCase__ , 1 )
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
if pa.types.is_list(UpperCAmelCase__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if isinstance(lst[0] , UpperCAmelCase__ ):
change_first_primitive_element_in_list(lst[0] , UpperCAmelCase__ )
else:
lowerCamelCase = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = pa.array(TypedSequence(UpperCAmelCase__ , optimized_int_type=UpperCAmelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = pa.array(OptimizedTypedSequence(UpperCAmelCase__ , col=UpperCAmelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
lowerCamelCase = copy.deepcopy(UpperCAmelCase__ )
lowerCamelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase = pa.array(OptimizedTypedSequence(UpperCAmelCase__ , col=UpperCAmelCase__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=UpperCAmelCase__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = "mock://dataset-train.arrow"
with ArrowWriter(path=UpperCAmelCase__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(UpperCAmelCase__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
lowerCamelCase , lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(UpperCAmelCase__ )
def __lowercase( ):
"""simple docstring"""
lowerCamelCase = pa.BufferOutputStream()
with ParquetWriter(stream=UpperCAmelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
lowerCamelCase , lowerCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
lowerCamelCase = pa.BufferReader(output.getvalue() )
lowerCamelCase = pq.read_table(UpperCAmelCase__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
import PIL.Image
lowerCamelCase = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(UpperCAmelCase__ , format="png" )
lowerCamelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=UpperCAmelCase__ , features=Features({"image": Image()} ) , embed_local_files=UpperCAmelCase__ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
lowerCamelCase = pa.BufferReader(output.getvalue() )
lowerCamelCase = pq.read_table(UpperCAmelCase__ )
lowerCamelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , UpperCAmelCase__ )
with open(UpperCAmelCase__ , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowercase( ):
"""simple docstring"""
lowerCamelCase = pa.schema([pa.field("col_1" , pa.string() , nullable=UpperCAmelCase__ )] )
lowerCamelCase = pa.BufferOutputStream()
with ArrowWriter(stream=UpperCAmelCase__ ) as writer:
writer._build_writer(inferred_schema=UpperCAmelCase__ )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] ) | 484 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
lowerCamelCase = mock.Mock()
lowerCamelCase = 5_00
lowerCamelCase = {}
lowerCamelCase = HTTPError
lowerCamelCase = {}
# Download this model to make sure it's in the cache.
lowerCamelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__a ) as mock_head:
lowerCamelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _a (self ):
'''simple docstring'''
lowerCamelCase = mock.Mock()
lowerCamelCase = 5_00
lowerCamelCase = {}
lowerCamelCase = HTTPError
lowerCamelCase = {}
# Download this model to make sure it's in the cache.
lowerCamelCase = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__a ) as mock_head:
lowerCamelCase = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def _a (self ):
'''simple docstring'''
try:
lowerCamelCase = tempfile.mktemp()
with open(__a , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , __a )
lowerCamelCase = AlbertTokenizer.from_pretrained(__a )
finally:
os.remove(__a )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , __a )
lowerCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 10_00 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def _a (self ):
'''simple docstring'''
lowerCamelCase = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
_A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def _a (cls ):
'''simple docstring'''
lowerCamelCase = TOKEN
HfFolder.save_token(__a )
@classmethod
def _a (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def _a (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = os.path.join(__a , "vocab.txt" )
with open(__a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase = BertTokenizer(__a )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
lowerCamelCase = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a , repo_id="test-tokenizer" , push_to_hub=__a , use_auth_token=self._token )
lowerCamelCase = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _a (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = os.path.join(__a , "vocab.txt" )
with open(__a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase = BertTokenizer(__a )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
lowerCamelCase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__a , repo_id="valid_org/test-tokenizer-org" , push_to_hub=__a , use_auth_token=self._token )
lowerCamelCase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _a (self ):
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = os.path.join(__a , "vocab.txt" )
with open(__a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase = CustomTokenizer(__a )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
lowerCamelCase = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=__a )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = os.path.join(__a , "vocab.txt" )
with open(__a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase = BertTokenizerFast.from_pretrained(__a )
bert_tokenizer.save_pretrained(__a )
lowerCamelCase = CustomTokenizerFast.from_pretrained(__a )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
lowerCamelCase = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=__a )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
lowerCamelCase = AutoTokenizer.from_pretrained(
F"""{USER}/test-dynamic-tokenizer""" , use_fast=__a , trust_remote_code=__a )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
lowerCamelCase = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(__a , ["AB", "C"] ) | 484 | 1 |
'''simple docstring'''
def _a ( lowerCamelCase_ ):
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349 | 1 |
from __future__ import annotations
def A (__A : tuple[int, int] , __A : int ) -> list[tuple[int, int]]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = position
UpperCAmelCase_ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__A )
return permissible_positions
def A (__A : list[list[int]] ) -> bool:
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def A (__A : list[list[int]] , __A : tuple[int, int] , __A : int ) -> bool:
"""simple docstring"""
if is_complete(__A ):
return True
for position in get_valid_pos(__A , len(__A ) ):
UpperCAmelCase_ , UpperCAmelCase_ = position
if board[y][x] == 0:
UpperCAmelCase_ = curr + 1
if open_knight_tour_helper(__A , __A , curr + 1 ):
return True
UpperCAmelCase_ = 0
return False
def A (__A : int ) -> list[list[int]]:
"""simple docstring"""
UpperCAmelCase_ = [[0 for i in range(__A )] for j in range(__A )]
for i in range(__A ):
for j in range(__A ):
UpperCAmelCase_ = 1
if open_knight_tour_helper(__A , (i, j) , 1 ):
return board
UpperCAmelCase_ = 0
UpperCAmelCase_ = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 169 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : List[Any] = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __snake_case ( a ):
UpperCAmelCase__ : List[Any] = '''data2vec-audio'''
def __init__( self : str , _snake_case : List[str]=32 , _snake_case : Any=768 , _snake_case : Any=12 , _snake_case : Dict=12 , _snake_case : Any=3072 , _snake_case : int="gelu" , _snake_case : List[Any]=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Optional[int]=0.1 , _snake_case : int=0.0 , _snake_case : int=0.1 , _snake_case : Dict=0.1 , _snake_case : str=0.0_2 , _snake_case : Dict=1e-5 , _snake_case : Union[str, Any]="gelu" , _snake_case : Optional[Any]=(512, 512, 512, 512, 512, 512, 512) , _snake_case : Dict=(5, 2, 2, 2, 2, 2, 2) , _snake_case : str=(10, 3, 3, 3, 3, 2, 2) , _snake_case : Optional[Any]=False , _snake_case : List[Any]=16 , _snake_case : int=19 , _snake_case : Optional[int]=5 , _snake_case : List[Any]=0.0_5 , _snake_case : Any=10 , _snake_case : Optional[Any]=2 , _snake_case : List[str]=0.0 , _snake_case : List[Any]=10 , _snake_case : Tuple=0 , _snake_case : List[Any]="sum" , _snake_case : List[Any]=False , _snake_case : List[str]=False , _snake_case : List[Any]=256 , _snake_case : str=(512, 512, 512, 512, 1500) , _snake_case : Tuple=(5, 3, 3, 1, 1) , _snake_case : List[Any]=(1, 2, 3, 1, 1) , _snake_case : Optional[Any]=512 , _snake_case : Optional[int]=0 , _snake_case : List[Any]=1 , _snake_case : List[str]=2 , _snake_case : Tuple=False , _snake_case : str=3 , _snake_case : Tuple=2 , _snake_case : List[str]=3 , _snake_case : Dict=None , **_snake_case : Tuple , ):
"""simple docstring"""
super().__init__(**_snake_case , pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case)
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = feat_extract_activation
UpperCAmelCase_ = list(_snake_case)
UpperCAmelCase_ = list(_snake_case)
UpperCAmelCase_ = list(_snake_case)
UpperCAmelCase_ = conv_bias
UpperCAmelCase_ = num_conv_pos_embeddings
UpperCAmelCase_ = num_conv_pos_embedding_groups
UpperCAmelCase_ = conv_pos_kernel_size
UpperCAmelCase_ = len(self.conv_dim)
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = feat_proj_dropout
UpperCAmelCase_ = final_dropout
UpperCAmelCase_ = layerdrop
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ = mask_time_prob
UpperCAmelCase_ = mask_time_length
UpperCAmelCase_ = mask_time_min_masks
UpperCAmelCase_ = mask_feature_prob
UpperCAmelCase_ = mask_feature_length
UpperCAmelCase_ = mask_feature_min_masks
# ctc loss
UpperCAmelCase_ = ctc_loss_reduction
UpperCAmelCase_ = ctc_zero_infinity
# adapter
UpperCAmelCase_ = add_adapter
UpperCAmelCase_ = adapter_kernel_size
UpperCAmelCase_ = adapter_stride
UpperCAmelCase_ = num_adapter_layers
UpperCAmelCase_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ = list(_snake_case)
UpperCAmelCase_ = list(_snake_case)
UpperCAmelCase_ = list(_snake_case)
UpperCAmelCase_ = xvector_output_dim
@property
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return math.prod(self.conv_stride)
| 169 | 1 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
UpperCamelCase = TypeVar("T")
UpperCamelCase = Union[List[T], Tuple[T, ...]]
UpperCamelCase = Union[T, List[T], Dict[str, T]]
UpperCamelCase = Union[str, bytes, os.PathLike]
| 66 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Any = "upernet"
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=[1, 2, 3, 6] , _lowerCAmelCase=True , _lowerCAmelCase=0.4 , _lowerCAmelCase=3_8_4 , _lowerCAmelCase=2_5_6 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=2_5_5 , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_lowercase : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[Any] = backbone_config.get('model_type' )
_lowercase : str = CONFIG_MAPPING[backbone_model_type]
_lowercase : Tuple = config_class.from_dict(_lowerCAmelCase )
_lowercase : Optional[Any] = backbone_config
_lowercase : Any = hidden_size
_lowercase : Any = initializer_range
_lowercase : Tuple = pool_scales
_lowercase : List[Any] = use_auxiliary_head
_lowercase : Optional[Any] = auxiliary_loss_weight
_lowercase : Any = auxiliary_in_channels
_lowercase : Any = auxiliary_channels
_lowercase : List[str] = auxiliary_num_convs
_lowercase : List[str] = auxiliary_concat_input
_lowercase : Tuple = loss_ignore_index
def __a ( self ):
_lowercase : str = copy.deepcopy(self.__dict__ )
_lowercase : Tuple = self.backbone_config.to_dict()
_lowercase : int = self.__class__.model_type
return output
| 66 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = DiTPipeline
UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
UpperCAmelCase = False
def _UpperCamelCase ( self : List[Any] ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase = TransformeraDModel(
sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCAmelCase__ , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_0_0_0 , norm_type="ada_norm_zero" , norm_elementwise_affine=lowerCAmelCase__ , )
UpperCAmelCase = AutoencoderKL()
UpperCAmelCase = DDIMScheduler()
UpperCAmelCase = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def _UpperCamelCase ( self : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]=0 ) -> Tuple:
if str(lowerCAmelCase__ ).startswith("mps" ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase__ )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
UpperCAmelCase = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self : Union[str, Any] ) -> Any:
UpperCAmelCase = "cpu"
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = self.get_dummy_inputs(lowerCAmelCase__ )
UpperCAmelCase = pipe(**lowerCAmelCase__ ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 1_6, 1_6, 3) )
UpperCAmelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase__ , 1e-3 )
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
self._test_inference_batch_single_identical(relax_max_difference=lowerCAmelCase__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCamelCase ( self : Any ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : str ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
UpperCAmelCase = ["vase", "umbrella", "white shark", "white wolf"]
UpperCAmelCase = pipe.get_label_ids(lowerCAmelCase__ )
UpperCAmelCase = pipe(lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=4_0 , output_type="np" ).images
for word, image in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase = load_numpy(
f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1e-2
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
UpperCAmelCase = ["vase", "umbrella"]
UpperCAmelCase = pipe.get_label_ids(lowerCAmelCase__ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2_5 , output_type="np" ).images
for word, image in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 702 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase__ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _UpperCamelCase ( self : int ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : List[Any] ) -> Dict:
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=0.9 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : Optional[int]=0.5 ) -> Any:
if NLTK_VERSION >= version.Version("3.6.5" ):
UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(lowerCAmelCase__ ) , word_tokenize(lowerCAmelCase__ ) , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
else:
UpperCAmelCase = [
meteor_score.single_meteor_score(lowerCAmelCase__ , lowerCAmelCase__ , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
return {"meteor": np.mean(lowerCAmelCase__ )}
| 1 | 0 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a__ ( lowercase__ ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class A ( __lowercase ):
@staticmethod
def lowerCAmelCase__ ( _lowerCAmelCase: ArgumentParser ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=_lowerCAmelCase , help="Name of the model to download" )
download_parser.set_defaults(func=_lowerCAmelCase )
def __init__( self: Tuple , _lowerCAmelCase: str , _lowerCAmelCase: str , _lowerCAmelCase: bool , _lowerCAmelCase: bool ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =model
UpperCAmelCase_ =cache
UpperCAmelCase_ =force
UpperCAmelCase_ =trust_remote_code
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 54 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__UpperCamelCase : Union[str, Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def a_ ( _A ) -> Any:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
snake_case__ = k.replace(_A , _A )
return k
def a_ ( _A , _A ) -> PegasusForConditionalGeneration:
"""simple docstring"""
snake_case__ = DEFAULTS.copy()
cfg_kwargs.update(_A )
snake_case__ = PegasusConfig(**_A )
snake_case__ = PegasusForConditionalGeneration(_A )
snake_case__ = torch_model.model.state_dict()
snake_case__ = {}
for k, v in tf_weights.items():
snake_case__ = rename_state_dict_key(_A )
if new_k not in sd:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
snake_case__ = v.T
snake_case__ = torch.tensor(_A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
snake_case__ = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
snake_case__ = mapping['shared.weight']
snake_case__ = mapping['shared.weight']
snake_case__ = {k: torch.zeros_like(_A ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**_A )
snake_case__ , snake_case__ = torch_model.model.load_state_dict(_A , strict=_A )
snake_case__ = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def a_ ( _A="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
"""simple docstring"""
snake_case__ = tf.train.list_variables(_A )
snake_case__ = {}
snake_case__ = ['Adafactor', 'global_step']
for name, shape in tqdm(_A , desc='converting tf checkpoint to dict' ):
snake_case__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case__ = tf.train.load_variable(_A , _A )
snake_case__ = array
return tf_weights
def a_ ( _A , _A ) -> List[Any]:
"""simple docstring"""
# save tokenizer first
snake_case__ = Path(_A ).parent.name
snake_case__ = task_specific_params[f'''summarization_{dataset}''']['max_position_embeddings']
snake_case__ = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=_A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_A )
# convert model
snake_case__ = get_tf_weights_as_numpy(_A )
snake_case__ = task_specific_params[f'''summarization_{dataset}''']
if dataset == "large":
snake_case__ = task_specific_params
snake_case__ = convert_pegasus(_A , _A )
torch_model.save_pretrained(_A )
snake_case__ = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(_A , Path(_A ) / 'pytorch_model.bin' )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
__UpperCamelCase : List[Any] = parser.parse_args()
if args.save_dir is None:
__UpperCamelCase : Any = Path(args.tf_ckpt_path).parent.name
__UpperCamelCase : List[Any] = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 328 | 0 |
import random
from typing import Any
def A__ ( _a : list ):
'''simple docstring'''
for _ in range(len(_a ) ):
snake_case__ : Union[str, Any] =random.randint(0 , len(_a ) - 1 )
snake_case__ : Optional[int] =random.randint(0 , len(_a ) - 1 )
snake_case__ , snake_case__ : Dict =data[b], data[a]
return data
if __name__ == "__main__":
__lowerCamelCase : Dict = [0, 1, 2, 3, 4, 5, 6, 7]
__lowerCamelCase : Union[str, Any] = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 448 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( _A , unittest.TestCase ):
_a : Any = KandinskyImgaImgPipeline
_a : List[Any] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
_a : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
_a : int = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_a : List[str] = False
@property
def lowercase__ ( self ):
return 3_2
@property
def lowercase__ ( self ):
return 3_2
@property
def lowercase__ ( self ):
return self.time_input_dim
@property
def lowercase__ ( self ):
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
return 1_0_0
@property
def lowercase__ ( self ):
snake_case__ : Dict =XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def lowercase__ ( self ):
torch.manual_seed(0 )
snake_case__ : Optional[Any] =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
snake_case__ : Optional[int] =MultilingualCLIP(a )
snake_case__ : Union[str, Any] =text_encoder.eval()
return text_encoder
@property
def lowercase__ ( self ):
torch.manual_seed(0 )
snake_case__ : Optional[int] ={
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case__ : List[str] =UNetaDConditionModel(**a )
return model
@property
def lowercase__ ( self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self ):
torch.manual_seed(0 )
snake_case__ : List[Any] =VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self ):
snake_case__ : str =self.dummy_text_encoder
snake_case__ : Tuple =self.dummy_tokenizer
snake_case__ : List[str] =self.dummy_unet
snake_case__ : Dict =self.dummy_movq
snake_case__ : Union[str, Any] ={
"""num_train_timesteps""": 1_0_0_0,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
snake_case__ : Union[str, Any] =DDIMScheduler(**a )
snake_case__ : Any ={
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowercase__ ( self , a , a=0 ):
snake_case__ : str =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a ) ).to(a )
snake_case__ : Tuple =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a )
# create init_image
snake_case__ : str =floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(a ) ).to(a )
snake_case__ : Tuple =image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : Dict =Image.fromarray(np.uinta(a ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
if str(a ).startswith("""mps""" ):
snake_case__ : Dict =torch.manual_seed(a )
else:
snake_case__ : Optional[Any] =torch.Generator(device=a ).manual_seed(a )
snake_case__ : str ={
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 1_0,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowercase__ ( self ):
snake_case__ : List[str] ="""cpu"""
snake_case__ : Tuple =self.get_dummy_components()
snake_case__ : Optional[int] =self.pipeline_class(**a )
snake_case__ : Union[str, Any] =pipe.to(a )
pipe.set_progress_bar_config(disable=a )
snake_case__ : List[Any] =pipe(**self.get_dummy_inputs(a ) )
snake_case__ : List[str] =output.images
snake_case__ : int =pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
snake_case__ : str =image[0, -3:, -3:, -1]
snake_case__ : Tuple =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : List[str] =np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def lowercase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
snake_case__ : Any =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
snake_case__ : Dict =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case__ : Any ="""A red cartoon frog, 4k"""
snake_case__ : Dict =KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(a )
snake_case__ : Tuple =KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
snake_case__ : str =pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
snake_case__ : Optional[Any] =torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case__ , snake_case__ : Any =pipe_prior(
a , generator=a , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
snake_case__ : str =pipeline(
a , image=a , image_embeds=a , negative_image_embeds=a , generator=a , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="""np""" , )
snake_case__ : Optional[int] =output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(a , a )
| 448 | 1 |
SCREAMING_SNAKE_CASE : Optional[int] = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
SCREAMING_SNAKE_CASE : List[str] = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = from_type.lower().strip("s" )
__a = to_type.lower().strip("s" )
__a = UNIT_SYMBOL.get(_A , _A )
__a = UNIT_SYMBOL.get(_A , _A )
if from_sanitized not in METRIC_CONVERSION:
__a = (
f"""Invalid 'from_type' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(_A )}"""
)
raise ValueError(_A )
if to_sanitized not in METRIC_CONVERSION:
__a = (
f"""Invalid 'to_type' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(_A )}"""
)
raise ValueError(_A )
__a = METRIC_CONVERSION[from_sanitized]
__a = METRIC_CONVERSION[to_sanitized]
__a = 1
if from_exponent > to_exponent:
__a = from_exponent - to_exponent
else:
__a = -(to_exponent - from_exponent)
return value * pow(10 , _A )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 197 | import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def __A ( ):
"""simple docstring"""
__a = 10
__a = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
__a = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(_A ) ),
} , features=_A , )
return dataset
@pytest.fixture(scope="session" )
def __A ( _A , _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=_A )
return filename
# FILE_CONTENT + files
SCREAMING_SNAKE_CASE : Tuple = """\
Text data.
Second line of data."""
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "file.txt"
__a = FILE_CONTENT
with open(_A , "w" ) as f:
f.write(_A )
return filename
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
import bza
__a = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
__a = bytes(_A , "utf-8" )
with bza.open(_A , "wb" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
import gzip
__a = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
__a = bytes(_A , "utf-8" )
with gzip.open(_A , "wb" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__a = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
__a = bytes(_A , "utf-8" )
with lza.frame.open(_A , "wb" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A ):
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__a = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(_A , "w" ) as archive:
archive.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A ):
"""simple docstring"""
import tarfile
__a = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(_A , "w" ) as f:
f.add(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
import lzma
__a = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
__a = bytes(_A , "utf-8" )
with lzma.open(_A , "wb" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A ):
"""simple docstring"""
import zipfile
__a = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__a = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
__a = bytes(_A , "utf-8" )
with zstd.open(_A , "wb" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "file.xml"
__a = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(_A , "w" ) as f:
f.write(_A )
return filename
SCREAMING_SNAKE_CASE : Any = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
SCREAMING_SNAKE_CASE : Optional[int] = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
SCREAMING_SNAKE_CASE : Any = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
SCREAMING_SNAKE_CASE : int = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
SCREAMING_SNAKE_CASE : List[Any] = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope="session" )
def __A ( ):
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = datasets.Dataset.from_dict(_A )
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(_A ) ) as con:
__a = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(_A , "w" , newline="" ) as f:
__a = csv.DictWriter(_A , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(_A , "w" , newline="" ) as f:
__a = csv.DictWriter(_A , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A ):
"""simple docstring"""
import bza
__a = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(_A , "rb" ) as f:
__a = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_A , "wb" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
f.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(_A , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.join("main_dir" , os.path.basename(_A ) ) )
f.write(_A , arcname=os.path.join("main_dir" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
__a = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(_A , "wb" ) as f:
__a = pq.ParquetWriter(_A , schema=_A )
__a = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_A ) )] for k in DATA[0]} , schema=_A )
writer.write_table(_A )
writer.close()
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
__a = {"data": DATA}
with open(_A , "w" ) as f:
json.dump(_A , _A )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
__a = {"data": DATA_DICT_OF_LISTS}
with open(_A , "w" ) as f:
json.dump(_A , _A )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(_A , "w" ) as f:
for item in DATA:
f.write(json.dumps(_A ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(_A , "w" ) as f:
for item in DATA:
f.write(json.dumps(_A ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(_A , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(_A ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(_A , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(_A ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A ):
"""simple docstring"""
import gzip
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(_A , "rb" ) as orig_file:
with gzip.open(_A , "wb" ) as zipped_file:
zipped_file.writelines(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A ):
"""simple docstring"""
import gzip
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(_A , "rb" ) as orig_file:
with gzip.open(_A , "wb" ) as zipped_file:
zipped_file.writelines(_A )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
f.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.join("nested" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.join("main_dir" , os.path.basename(_A ) ) )
f.write(_A , arcname=os.path.join("main_dir" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(_A , "w" ) as f:
f.add(_A , arcname=os.path.basename(_A ) )
f.add(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(_A , "w" ) as f:
f.add(_A , arcname=os.path.join("nested" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = ["0", "1", "2", "3"]
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(_A , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = ["0", "1", "2", "3"]
__a = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(_A , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = ["0", "1", "2", "3"]
__a = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(_A , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
f.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.join("main_dir" , os.path.basename(_A ) ) )
f.write(_A , arcname=os.path.join("main_dir" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.basename("unsupported.ext" ) )
f.write(_A , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
__a = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(_A , "w" , encoding="utf-8" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="session" )
def __A ( ):
"""simple docstring"""
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def __A ( ):
"""simple docstring"""
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def __A ( _A , _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(_A , "w" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
f.write(_A , arcname=os.path.basename(_A ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def __A ( _A ):
"""simple docstring"""
__a = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 197 | 1 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class lowerCAmelCase_ ( lowercase_ ):
SCREAMING_SNAKE_CASE_ : Union[List[PIL.Image.Image], np.ndarray]
SCREAMING_SNAKE_CASE_ : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 416 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _A ( _UpperCamelCase ):
_UpperCAmelCase : Tuple = prime_factors(_UpperCamelCase )
if is_square_free(_UpperCamelCase ):
return -1 if len(_UpperCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 416 | 1 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case , _snake_case=sys.maxsize ) -> str:
_UpperCamelCase : str = "bilinear"
_UpperCamelCase : Any = max_size
_UpperCamelCase : Dict = short_edge_length
def __call__( self , _snake_case ) -> Tuple:
_UpperCamelCase : str = []
for img in imgs:
_UpperCamelCase : Dict = img.shape[:2]
# later: provide list and randomly choose index for resize
_UpperCamelCase : str = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_UpperCamelCase : Optional[int] = size * 1.0 / min(lowerCAmelCase__ , lowerCAmelCase__ )
if h < w:
_UpperCamelCase : Tuple = size, scale * w
else:
_UpperCamelCase : Dict = scale * h, size
if max(lowerCAmelCase__ , lowerCAmelCase__ ) > self.max_size:
_UpperCamelCase : Any = self.max_size * 1.0 / max(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : Tuple = newh * scale
_UpperCamelCase : Optional[Any] = neww * scale
_UpperCamelCase : Dict = int(neww + 0.5 )
_UpperCamelCase : Union[str, Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
_UpperCamelCase : str = Image.fromarray(lowerCAmelCase__ )
_UpperCamelCase : int = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_UpperCamelCase : Union[str, Any] = np.asarray(lowerCAmelCase__ )
else:
_UpperCamelCase : int = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_UpperCamelCase : int = nn.functional.interpolate(
lowerCAmelCase__ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase__ ).squeeze(0 )
img_augs.append(lowerCAmelCase__ )
return img_augs
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case ) -> Optional[Any]:
_UpperCamelCase : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_UpperCamelCase : List[Any] = cfg.INPUT.FORMAT
_UpperCamelCase : Union[str, Any] = cfg.SIZE_DIVISIBILITY
_UpperCamelCase : List[str] = cfg.PAD_VALUE
_UpperCamelCase : List[Any] = cfg.INPUT.MAX_SIZE_TEST
_UpperCamelCase : List[Any] = cfg.MODEL.DEVICE
_UpperCamelCase : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_UpperCamelCase : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_UpperCamelCase : Dict = lambda _snake_case : (x - self.pixel_mean) / self.pixel_std
def _lowercase ( self , _snake_case ) -> int:
_UpperCamelCase : Dict = tuple(max(lowerCAmelCase__ ) for s in zip(*[img.shape for img in images] ) )
_UpperCamelCase : Any = [im.shape[-2:] for im in images]
_UpperCamelCase : Optional[int] = [
nn.functional.pad(
lowerCAmelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
return torch.stack(lowerCAmelCase__ ), torch.tensor(lowerCAmelCase__ )
def __call__( self , _snake_case , _snake_case=False ) -> List[str]:
with torch.no_grad():
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase : Dict = [images]
if single_image:
assert len(lowerCAmelCase__ ) == 1
for i in range(len(lowerCAmelCase__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowerCAmelCase__ , images.pop(lowerCAmelCase__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowerCAmelCase__ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_UpperCamelCase : List[Any] = torch.tensor([im.shape[:2] for im in images] )
_UpperCamelCase : List[Any] = self.aug(lowerCAmelCase__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_UpperCamelCase : Any = [self.normalizer(lowerCAmelCase__ ) for x in images]
# now pad them to do the following operations
_UpperCamelCase : int = self.pad(lowerCAmelCase__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_UpperCamelCase : Optional[Any] = torch.true_divide(lowerCAmelCase__ , lowerCAmelCase__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> List[str]:
assert torch.isfinite(UpperCamelCase ).all(), "Box tensor contains infinite or NaN!"
_UpperCamelCase : Optional[int] = box_size
tensor[:, 0].clamp_(min=0 ,max=UpperCamelCase )
tensor[:, 1].clamp_(min=0 ,max=UpperCamelCase )
tensor[:, 2].clamp_(min=0 ,max=UpperCamelCase )
tensor[:, 3].clamp_(min=0 ,max=UpperCamelCase )
| 683 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
snake_case_ : Tuple = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
snake_case_ : Union[str, Any] = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__magic_name__ ):
os.makedirs(__magic_name__ )
snake_case_ : str = model.state_dict()
def to_tf_var_name(__magic_name__ ):
for patt, repl in iter(__magic_name__ ):
snake_case_ : List[str] = name.replace(__magic_name__ ,__magic_name__ )
return F'''bert/{name}'''
def create_tf_var(__magic_name__ ,__magic_name__ ,__magic_name__ ):
snake_case_ : List[Any] = tf.dtypes.as_dtype(tensor.dtype )
snake_case_ : Union[str, Any] = tf.get_variable(dtype=__magic_name__ ,shape=tensor.shape ,name=__magic_name__ ,initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__magic_name__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
snake_case_ : Optional[int] = to_tf_var_name(__magic_name__ )
snake_case_ : Dict = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
snake_case_ : List[Any] = torch_tensor.T
snake_case_ : Union[str, Any] = create_tf_var(tensor=__magic_name__ ,name=__magic_name__ ,session=__magic_name__ )
tf.keras.backend.set_value(__magic_name__ ,__magic_name__ )
snake_case_ : List[str] = session.run(__magic_name__ )
print(F'''Successfully created {tf_name}: {np.allclose(__magic_name__ ,__magic_name__ )}''' )
snake_case_ : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(__magic_name__ ,os.path.join(__magic_name__ ,model_name.replace("-" ,"_" ) + ".ckpt" ) )
def __UpperCAmelCase ( __magic_name__=None )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument("--model_name" ,type=__magic_name__ ,required=__magic_name__ ,help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" ,type=__magic_name__ ,default=__magic_name__ ,required=__magic_name__ ,help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" ,type=__magic_name__ ,required=__magic_name__ ,help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" ,type=__magic_name__ ,required=__magic_name__ ,help="Directory in which to save tensorflow model" )
snake_case_ : Optional[int] = parser.parse_args(__magic_name__ )
snake_case_ : Optional[int] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name ,state_dict=torch.load(args.pytorch_model_path ) ,cache_dir=args.cache_dir ,)
convert_pytorch_checkpoint_to_tf(model=__magic_name__ ,ckpt_dir=args.tf_cache_dir ,model_name=args.model_name )
if __name__ == "__main__":
main()
| 653 | 0 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCAmelCase__ : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCAmelCase_ ( datasets.BuilderConfig ):
SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None
def _A ( _UpperCamelCase , _UpperCamelCase , ):
import pyspark
def generate_fn():
_UpperCAmelCase : Tuple = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
_UpperCAmelCase : Optional[Any] = df_with_partition_id.select('''*''' ).where(F'''part_id = {partition_id}''' ).drop('''part_id''' )
_UpperCAmelCase : Union[str, Any] = partition_df.collect()
_UpperCAmelCase : int = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class lowerCAmelCase_ ( _BaseExamplesIterable ):
def __init__( self : Tuple , UpperCAmelCase_ : "pyspark.sql.DataFrame" , UpperCAmelCase_ : Union[str, Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = df
_UpperCAmelCase : Optional[int] = partition_order or range(self.df.rdd.getNumPartitions() )
_UpperCAmelCase : Tuple = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Dict ) -> str:
'''simple docstring'''
yield from self.generate_examples_fn()
def a_ ( self : Tuple , UpperCAmelCase_ : np.random.Generator ) -> "SparkExamplesIterable":
'''simple docstring'''
_UpperCAmelCase : Dict = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCAmelCase_ )
return SparkExamplesIterable(self.df , partition_order=UpperCAmelCase_ )
def a_ ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> "SparkExamplesIterable":
'''simple docstring'''
_UpperCAmelCase : Dict = self.split_shard_indices_by_worker(UpperCAmelCase_ , UpperCAmelCase_ )
return SparkExamplesIterable(self.df , partition_order=UpperCAmelCase_ )
@property
def a_ ( self : List[str] ) -> int:
'''simple docstring'''
return len(self.partition_order )
class lowerCAmelCase_ ( datasets.DatasetBuilder ):
SCREAMING_SNAKE_CASE_ : int = SparkConfig
def __init__( self : Any , UpperCAmelCase_ : "pyspark.sql.DataFrame" , UpperCAmelCase_ : str = None , UpperCAmelCase_ : str = None , **UpperCAmelCase_ : int , ) -> Dict:
'''simple docstring'''
import pyspark
_UpperCAmelCase : Union[str, Any] = pyspark.sql.SparkSession.builder.getOrCreate()
_UpperCAmelCase : Union[str, Any] = df
_UpperCAmelCase : List[str] = working_dir
super().__init__(
cache_dir=UpperCAmelCase_ , config_name=str(self.df.semanticHash() ) , **UpperCAmelCase_ , )
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
def create_cache_and_write_probe(UpperCAmelCase_ : Tuple ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=UpperCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCAmelCase_ , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_UpperCAmelCase : List[Any] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(UpperCAmelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def a_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def a_ ( self : Optional[int] , UpperCAmelCase_ : datasets.download.download_manager.DownloadManager ) -> Dict:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def a_ ( self : List[Any] , UpperCAmelCase_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
import pyspark
def get_arrow_batch_size(UpperCAmelCase_ : List[str] ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
_UpperCAmelCase : str = self.df.count()
_UpperCAmelCase : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_UpperCAmelCase : Optional[Any] = (
self.df.limit(UpperCAmelCase_ )
.repartition(1 )
.mapInArrow(UpperCAmelCase_ , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_UpperCAmelCase : int = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_UpperCAmelCase : List[Any] = min(UpperCAmelCase_ , int(approx_total_size / max_shard_size ) )
_UpperCAmelCase : Tuple = self.df.repartition(UpperCAmelCase_ )
def a_ ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
'''simple docstring'''
import pyspark
_UpperCAmelCase : List[str] = ParquetWriter if file_format == '''parquet''' else ArrowWriter
_UpperCAmelCase : Dict = os.path.join(self._working_dir , os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath
_UpperCAmelCase : Union[str, Any] = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_UpperCAmelCase : Tuple = self.config.features
_UpperCAmelCase : List[Any] = self._writer_batch_size
_UpperCAmelCase : Tuple = self._fs.storage_options
def write_arrow(UpperCAmelCase_ : int ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_UpperCAmelCase : Any = pyspark.TaskContext().taskAttemptId()
_UpperCAmelCase : int = next(UpperCAmelCase_ , UpperCAmelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : List[str] = writer_class(
features=UpperCAmelCase_ , path=working_fpath.replace('''SSSSS''' , F'''{shard_id:05d}''' ).replace('''TTTTT''' , F'''{task_id:05d}''' ) , writer_batch_size=UpperCAmelCase_ , storage_options=UpperCAmelCase_ , embed_local_files=UpperCAmelCase_ , )
_UpperCAmelCase : List[str] = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCAmelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_UpperCAmelCase : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
_UpperCAmelCase : int = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , F'''{shard_id:05d}''' ).replace('''TTTTT''' , F'''{task_id:05d}''' ) , writer_batch_size=UpperCAmelCase_ , storage_options=UpperCAmelCase_ , embed_local_files=UpperCAmelCase_ , )
_UpperCAmelCase : int = pa.Table.from_batches([batch] )
writer.write_table(UpperCAmelCase_ )
if writer._num_bytes > 0:
_UpperCAmelCase : Union[str, Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ):
_UpperCAmelCase : List[str] = os.path.join(os.path.dirname(UpperCAmelCase_ ) , os.path.basename(UpperCAmelCase_ ) )
shutil.move(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : Optional[int] = (
self.df.mapInArrow(UpperCAmelCase_ , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def a_ ( self : Dict , UpperCAmelCase_ : "datasets.SplitGenerator" , UpperCAmelCase_ : str = "arrow" , UpperCAmelCase_ : Optional[Union[str, int]] = None , UpperCAmelCase_ : Optional[int] = None , **UpperCAmelCase_ : Optional[Any] , ) -> int:
'''simple docstring'''
self._validate_cache_dir()
_UpperCAmelCase : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCAmelCase_ )
_UpperCAmelCase : List[Any] = not is_remote_filesystem(self._fs )
_UpperCAmelCase : Tuple = os.path.join if is_local else posixpath.join
_UpperCAmelCase : List[Any] = '''-TTTTT-SSSSS-of-NNNNN'''
_UpperCAmelCase : Optional[int] = F'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
_UpperCAmelCase : Optional[int] = path_join(self._output_dir , UpperCAmelCase_ )
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : int = 0
_UpperCAmelCase : int = []
_UpperCAmelCase : Any = []
for task_id, content in self._prepare_split_single(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
(
_UpperCAmelCase
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCAmelCase_ )
_UpperCAmelCase : str = total_num_examples
_UpperCAmelCase : List[Any] = total_num_bytes
# should rename everything at the end
logger.debug(F'''Renaming {total_shards} shards.''' )
if total_shards > 1:
_UpperCAmelCase : Any = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_UpperCAmelCase : Tuple = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , ):
rename(
UpperCAmelCase_ , fpath.replace('''SSSSS''' , F'''{shard_id:05d}''' ).replace('''TTTTT''' , F'''{task_id:05d}''' ) , fpath.replace('''TTTTT-SSSSS''' , F'''{global_shard_id:05d}''' ).replace('''NNNNN''' , F'''{total_shards:05d}''' ) , )
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[Any] = 0
for i in range(len(UpperCAmelCase_ ) ):
_UpperCAmelCase : int = task_id_and_num_shards[i]
for shard_id in range(UpperCAmelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCAmelCase_ , len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect()
else:
# don't use any pattern
_UpperCAmelCase : int = 0
_UpperCAmelCase : str = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , F'''{shard_id:05d}''' ).replace('''TTTTT''' , F'''{task_id:05d}''' ) , fpath.replace(UpperCAmelCase_ , '''''' ) , )
def a_ ( self : Tuple , UpperCAmelCase_ : "datasets.SplitGenerator" , ) -> SparkExamplesIterable:
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 715 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
def _A ( _UpperCamelCase ):
if isinstance(_UpperCamelCase , np.ndarray ):
return list(tensor.shape )
_UpperCAmelCase : int = tf.shape(_UpperCamelCase )
if tensor.shape == tf.TensorShape(_UpperCamelCase ):
return dynamic
_UpperCAmelCase : Optional[int] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_UpperCamelCase )]
def _A ( _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None ):
return tf.nn.softmax(logits=logits + 1e-9 , axis=_UpperCamelCase , name=_UpperCamelCase )
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-5 , _UpperCamelCase=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
_UpperCAmelCase , _UpperCAmelCase : Tuple = tf.nn.moments(_UpperCamelCase , axes=[axis] , keepdims=_UpperCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_UpperCAmelCase : List[Any] = [1] * inputs.shape.rank
_UpperCAmelCase : Any = shape_list(_UpperCamelCase )[axis]
_UpperCAmelCase : Union[str, Any] = tf.reshape(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase : Any = tf.reshape(_UpperCamelCase , _UpperCamelCase )
# Compute layer normalization using the batch_normalization
# function.
_UpperCAmelCase : Union[str, Any] = tf.nn.batch_normalization(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , offset=_UpperCamelCase , scale=_UpperCamelCase , variance_epsilon=_UpperCamelCase , )
return outputs
def _A ( _UpperCamelCase , _UpperCamelCase=0 , _UpperCamelCase=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_UpperCAmelCase : str = tf.shape(_UpperCamelCase )
_UpperCAmelCase : Dict = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_UpperCAmelCase : str = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(_UpperCamelCase , _UpperCamelCase )
def _A ( _UpperCamelCase ):
if not isinstance(_UpperCamelCase , tf.Tensor ):
_UpperCAmelCase : Any = tf.convert_to_tensor(_UpperCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_UpperCAmelCase : List[Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_UpperCAmelCase : Dict = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_UpperCAmelCase : Optional[Any] = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = "input_ids" ):
tf.debugging.assert_less(
_UpperCamelCase , tf.cast(_UpperCamelCase , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(_UpperCamelCase )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase : int = 64_512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_UpperCAmelCase : int = [x for x in data if len(_UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
_UpperCAmelCase : Dict = np.asarray(_UpperCamelCase )
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Union[str, Any] = np.array_split(_UpperCamelCase , _UpperCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_UpperCAmelCase : Optional[Any] = np.array_split(_UpperCamelCase , _UpperCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_UpperCamelCase ):
_UpperCAmelCase : int = chunk_data
else:
_UpperCAmelCase : Optional[Any] = data
def _A ( _UpperCamelCase , _UpperCamelCase ):
if name in group.attrs:
_UpperCAmelCase : List[str] = [n.decode('''utf8''' ) if hasattr(_UpperCamelCase , '''decode''' ) else n for n in group.attrs[name]]
else:
_UpperCAmelCase : str = []
_UpperCAmelCase : int = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(_UpperCamelCase , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def _A ( _UpperCamelCase ):
def _expand_single_ad_tensor(_UpperCamelCase ):
if isinstance(_UpperCamelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_UpperCamelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , _UpperCamelCase )
| 416 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class A__ :
'''simple docstring'''
def __init__( self : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any=13 , _SCREAMING_SNAKE_CASE : Optional[int]=7 , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : str=True , _SCREAMING_SNAKE_CASE : Optional[Any]=True , _SCREAMING_SNAKE_CASE : Any=True , _SCREAMING_SNAKE_CASE : Dict=99 , _SCREAMING_SNAKE_CASE : str=32 , _SCREAMING_SNAKE_CASE : Optional[int]=2 , _SCREAMING_SNAKE_CASE : Optional[Any]=4 , _SCREAMING_SNAKE_CASE : List[Any]=37 , _SCREAMING_SNAKE_CASE : List[Any]="gelu" , _SCREAMING_SNAKE_CASE : Optional[int]=0.1 , _SCREAMING_SNAKE_CASE : int=0.1 , _SCREAMING_SNAKE_CASE : Optional[int]=512 , _SCREAMING_SNAKE_CASE : Union[str, Any]=16 , _SCREAMING_SNAKE_CASE : Optional[int]=2 , _SCREAMING_SNAKE_CASE : Optional[int]=0.0_2 , _SCREAMING_SNAKE_CASE : str=3 , _SCREAMING_SNAKE_CASE : int=4 , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Optional[Any]=0 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
UpperCamelCase = projection_dim
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
UpperCamelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Dict , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
UpperCamelCase = TFDPRContextEncoder(config=_SCREAMING_SNAKE_CASE )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
UpperCamelCase = TFDPRQuestionEncoder(config=_SCREAMING_SNAKE_CASE )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = TFDPRReader(config=_SCREAMING_SNAKE_CASE )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
snake_case__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
snake_case__ = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = TFDPRModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*_SCREAMING_SNAKE_CASE )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFDPRContextEncoder.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFDPRContextEncoder.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFDPRQuestionEncoder.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFDPRReader.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_tf
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
UpperCamelCase = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
UpperCamelCase = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 280 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ : Optional[int] = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class A__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
snake_case__ = DebertaVaTokenizer
snake_case__ = DebertaVaTokenizerFast
snake_case__ = True
snake_case__ = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = 'this is a test'
UpperCamelCase = 'this is a test'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = '<pad>'
UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '[PAD]' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 3_0001 )
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
UpperCamelCase = ' \tHeLLo!how \n Are yoU? '
UpperCamelCase = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ' \tHeLLo!how \n Are yoU? '
UpperCamelCase = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = 'This is a test'
UpperCamelCase = [13, 1, 4398, 25, 21, 1289]
UpperCamelCase = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
UpperCamelCase = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
UpperCamelCase = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# fmt: off
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
UpperCamelCase = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
UpperCamelCase = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.encode('sequence builders' )
UpperCamelCase = tokenizer.encode('multi-sequence build' )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _SCREAMING_SNAKE_CASE )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _SCREAMING_SNAKE_CASE , )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
| 280 | 1 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = IFInpaintingSuperResolutionPipeline
a__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
a__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
a__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
return self._get_superresolution_dummy_components()
def _A ( self :Optional[int] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=0 ) -> List[Any]:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith("mps" ):
snake_case_ : List[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
snake_case_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
snake_case_ : List[str] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
snake_case_ : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
snake_case_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
snake_case_ : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _A ( self :Union[str, Any] ) -> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _A ( self :List[Any] ) -> Dict:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _A ( self :Tuple ) -> List[Any]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
self._test_save_load_local()
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 705 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : int = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''cvt'''
def __init__( self :List[Any] , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Any=[7, 3, 3] , lowerCAmelCase__ :Dict=[4, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[2, 1, 1] , lowerCAmelCase__ :Any=[64, 192, 384] , lowerCAmelCase__ :List[str]=[1, 3, 6] , lowerCAmelCase__ :str=[1, 2, 10] , lowerCAmelCase__ :Any=[4.0, 4.0, 4.0] , lowerCAmelCase__ :int=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Optional[Any]=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Dict=[0.0, 0.0, 0.1] , lowerCAmelCase__ :List[Any]=[True, True, True] , lowerCAmelCase__ :List[Any]=[False, False, True] , lowerCAmelCase__ :Dict=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase__ :Any=[3, 3, 3] , lowerCAmelCase__ :Tuple=[1, 1, 1] , lowerCAmelCase__ :Optional[int]=[2, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[1, 1, 1] , lowerCAmelCase__ :Any=[1, 1, 1] , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Dict=1E-1_2 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : int = num_channels
snake_case_ : int = patch_sizes
snake_case_ : Optional[Any] = patch_stride
snake_case_ : Dict = patch_padding
snake_case_ : Tuple = embed_dim
snake_case_ : Optional[int] = num_heads
snake_case_ : Union[str, Any] = depth
snake_case_ : Optional[int] = mlp_ratio
snake_case_ : Tuple = attention_drop_rate
snake_case_ : str = drop_rate
snake_case_ : Tuple = drop_path_rate
snake_case_ : Any = qkv_bias
snake_case_ : Union[str, Any] = cls_token
snake_case_ : int = qkv_projection_method
snake_case_ : Any = kernel_qkv
snake_case_ : Union[str, Any] = padding_kv
snake_case_ : str = stride_kv
snake_case_ : Dict = padding_q
snake_case_ : Tuple = stride_q
snake_case_ : Any = initializer_range
snake_case_ : Any = layer_norm_eps
| 656 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.