code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
def __magic_name__ ( A : int = 10**12 ):
'''simple docstring'''
a = 1
a = 0
a = 1
a = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F'''{solution() = }''')
| 702 |
def __magic_name__ ( A : str, A : str ):
'''simple docstring'''
def get_matched_characters(A : str, A : str ) -> str:
a = []
a = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a = int(max(0, i - limit ) )
a = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A )
a = F"""{_stra[0:_stra.index(A )]} {_stra[_stra.index(A ) + 1:]}"""
return "".join(A )
# matching characters
a = get_matched_characters(A, A )
a = get_matched_characters(A, A )
a = len(A )
# transposition
a = (
len([(ca, ca) for ca, ca in zip(A, A ) if ca != ca] ) // 2
)
if not match_count:
a = 0.0
else:
a = (
1
/ 3
* (
match_count / len(A )
+ match_count / len(A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 662 | 0 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__magic_name__ : Optional[int] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@classmethod
def __UpperCAmelCase ( cls : List[Any] ) -> Optional[Any]:
a = TOKEN
HfFolder.save_token(__lowerCamelCase )
@classmethod
def __UpperCAmelCase ( cls : Any ) -> Any:
try:
delete_repo(token=cls._token , repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
a = FlaxBertModel(__lowerCamelCase )
model.push_to_hub("test-model-flax" , use_auth_token=self._token )
a = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
a = flatten_dict(unfreeze(model.params ) )
a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__lowerCamelCase , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__lowerCamelCase , repo_id="test-model-flax" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
a = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
a = flatten_dict(unfreeze(model.params ) )
a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__lowerCamelCase , 1e-3 , msg=f"""{key} not identical""" )
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
a = FlaxBertModel(__lowerCamelCase )
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token )
a = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
a = flatten_dict(unfreeze(model.params ) )
a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__lowerCamelCase , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__lowerCamelCase , repo_id="valid_org/test-model-flax-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
a = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
a = flatten_dict(unfreeze(model.params ) )
a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__lowerCamelCase , 1e-3 , msg=f"""{key} not identical""" )
def __magic_name__ ( A : str, A : List[str] ):
'''simple docstring'''
a = True
a = flatten_dict(modela.params )
a = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
a = False
return models_are_equal
@require_flax
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
a = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
a = FlaxBertModel(__lowerCamelCase )
a = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__lowerCamelCase , __lowerCamelCase ) )
with self.assertRaises(__lowerCamelCase ):
a = FlaxBertModel.from_pretrained(__lowerCamelCase )
a = FlaxBertModel.from_pretrained(__lowerCamelCase , subfolder=__lowerCamelCase )
self.assertTrue(check_models_equal(__lowerCamelCase , __lowerCamelCase ) )
def __UpperCAmelCase ( self : List[str] ) -> Any:
a = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
a = FlaxBertModel(__lowerCamelCase )
a = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__lowerCamelCase , __lowerCamelCase ) , max_shard_size="10KB" )
with self.assertRaises(__lowerCamelCase ):
a = FlaxBertModel.from_pretrained(__lowerCamelCase )
a = FlaxBertModel.from_pretrained(__lowerCamelCase , subfolder=__lowerCamelCase )
self.assertTrue(check_models_equal(__lowerCamelCase , __lowerCamelCase ) )
def __UpperCAmelCase ( self : Any ) -> List[str]:
a = "bert"
a = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(__lowerCamelCase ):
a = FlaxBertModel.from_pretrained(__lowerCamelCase )
a = FlaxBertModel.from_pretrained(__lowerCamelCase , subfolder=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
a = "bert"
a = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(__lowerCamelCase ):
a = FlaxBertModel.from_pretrained(__lowerCamelCase )
a = FlaxBertModel.from_pretrained(__lowerCamelCase , subfolder=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
| 703 |
__lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)}
def __magic_name__ ( A : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) )
def __magic_name__ ( ):
'''simple docstring'''
return sum(
number
for number in range(1000, 1000000 )
if number == digits_fifth_powers_sum(A ) )
if __name__ == "__main__":
print(solution())
| 662 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : Optional[int] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a = {"unk_token": "<unk>"}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
a = "lower newer"
a = "lower newer"
return input_text, output_text
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = "lower newer"
a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokens + [tokenizer.unk_token]
a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self : Any ) -> str:
a = self.get_tokenizer()
a = "Encode this sequence."
a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# Testing spaces after special tokens
a = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
a = "Encode <mask> sequence"
a = "Encode <mask>sequence"
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
pass
def __UpperCAmelCase ( self : int ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = "A, <mask> AllenNLP sentence."
a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a = f"""{text_of_1_token} {text_of_1_token}"""
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
| 704 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any=7 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : int=30 , __lowerCamelCase : int=4_00 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=1 / 2_55 , __lowerCamelCase : Optional[int]=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
a = parent
a = batch_size
a = num_channels
a = min_resolution
a = max_resolution
a = do_resize
a = size
a = do_normalize
a = image_mean
a = image_std
a = do_rescale
a = rescale_factor
a = do_pad
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str=False ) -> List[str]:
if not batched:
a = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
a , a = image.size
else:
a , a = image.shape[1], image.shape[2]
if w < h:
a = int(self.size["shortest_edge"] * h / w )
a = self.size["shortest_edge"]
elif w > h:
a = self.size["shortest_edge"]
a = int(self.size["shortest_edge"] * w / h )
else:
a = self.size["shortest_edge"]
a = self.size["shortest_edge"]
else:
a = []
for image in image_inputs:
a , a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> int:
pass
def __UpperCAmelCase ( self : Any ) -> Any:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Any ) -> List[str]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCAmelCase ( self : Any ) -> List[Any]:
# prepare image and target
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"image_id": 3_97_69, "annotations": target}
# encode them
a = DetaImageProcessor()
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
# prepare image, target and masks_path
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
a = DetaImageProcessor(format="coco_panoptic" )
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify masks
a = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
| 662 | 0 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowerCAmelCase : Optional[Any] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
__lowerCAmelCase : str = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
__lowerCAmelCase : List[Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[List[List[str]]] , __lowerCamelCase : List[List[str]] , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__lowerCamelCase , hypotheses=__lowerCamelCase , min_len=__lowerCamelCase , max_len=__lowerCamelCase )
}
| 705 |
def __magic_name__ ( A : list ):
'''simple docstring'''
for i in range(len(A ) - 1, 0, -1 ):
a = False
for j in range(A, 0, -1 ):
if unsorted[j] < unsorted[j - 1]:
a , a = unsorted[j - 1], unsorted[j]
a = True
for j in range(A ):
if unsorted[j] > unsorted[j + 1]:
a , a = unsorted[j + 1], unsorted[j]
a = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(',')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 662 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : str = {
'configuration_clip': [
'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPConfig',
'CLIPOnnxConfig',
'CLIPTextConfig',
'CLIPVisionConfig',
],
'processing_clip': ['CLIPProcessor'],
'tokenization_clip': ['CLIPTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = ['CLIPTokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = ['CLIPFeatureExtractor']
__lowerCAmelCase : List[Any] = ['CLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPModel',
'CLIPPreTrainedModel',
'CLIPTextModel',
'CLIPTextModelWithProjection',
'CLIPVisionModel',
'CLIPVisionModelWithProjection',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] = [
'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCLIPModel',
'TFCLIPPreTrainedModel',
'TFCLIPTextModel',
'TFCLIPVisionModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
'FlaxCLIPModel',
'FlaxCLIPPreTrainedModel',
'FlaxCLIPTextModel',
'FlaxCLIPTextPreTrainedModel',
'FlaxCLIPVisionModel',
'FlaxCLIPVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 706 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowerCAmelCase : Optional[Any] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
__lowerCAmelCase : str = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
__lowerCAmelCase : List[Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[List[List[str]]] , __lowerCamelCase : List[List[str]] , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__lowerCamelCase , hypotheses=__lowerCamelCase , min_len=__lowerCamelCase , max_len=__lowerCamelCase )
}
| 662 | 0 |
'''simple docstring'''
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def __magic_name__ ( A : bool = True, *A : int, **A : List[Any] ):
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
a = False
if main_process_only:
a = PartialState().local_process_index == 0
return _tqdm(*A, **A, disable=A )
| 707 |
import argparse
import os
import re
__lowerCAmelCase : Union[str, Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__lowerCAmelCase : Dict = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
__lowerCAmelCase : Any = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __magic_name__ ( A : int, A : bool = False ):
'''simple docstring'''
with open(A, "r", encoding="utf-8" ) as f:
a = f.read()
a = content.split("\n" )
a = []
a = 0
while line_idx < len(A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a = len(re.search(R"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a = sorted(A, key=lambda A : _re_identifier.search(A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(A, "w", encoding="utf-8" ) as f:
f.write("\n".join(A ) )
elif "\n".join(A ) != content:
return True
def __magic_name__ ( A : bool = False ):
'''simple docstring'''
a = [os.path.join(A, A ) for f in os.listdir(A ) if f.endswith(".py" )]
a = [sort_auto_mapping(A, overwrite=A ) for fname in fnames]
if not overwrite and any(A ):
a = [f for f, d in zip(A, A ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {", ".join(A )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 662 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = '▁'
__lowerCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model'}
__lowerCAmelCase : int = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
__lowerCAmelCase : Any = {
'google/reformer-crime-and-punishment': 52_4288,
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Dict=[] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ) -> None:
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, int]:
a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Optional[Any]:
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : str , __lowerCamelCase : Tuple ) -> List[Any]:
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : int , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict ) -> Any:
return self.sp_model.piece_to_id(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ) -> str:
if index < self.sp_model.get_piece_size():
a = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
a = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 708 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = '▁'
__lowerCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model'}
__lowerCAmelCase : int = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
__lowerCAmelCase : Any = {
'google/reformer-crime-and-punishment': 52_4288,
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Dict=[] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ) -> None:
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, int]:
a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Optional[Any]:
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : str , __lowerCamelCase : Tuple ) -> List[Any]:
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : int , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict ) -> Any:
return self.sp_model.piece_to_id(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ) -> str:
if index < self.sp_model.get_piece_size():
a = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
a = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 662 | 0 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__lowerCAmelCase : List[Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__lowerCAmelCase : int = logging.getLogger()
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser()
parser.add_argument("-f" )
a = parser.parse_args()
return args.f
def __magic_name__ ( A : Union[str, Any], A : Union[str, Any]="eval" ):
'''simple docstring'''
a = os.path.join(A, F"""{split}_results.json""" )
if os.path.exists(A ):
with open(A, "r" ) as f:
return json.load(A )
raise ValueError(F"""can't find {path}""" )
__lowerCAmelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
a = self.get_auto_remove_tmp_dir()
a = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ):
run_flax_glue.main()
a = get_results(__lowerCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
a = self.get_auto_remove_tmp_dir()
a = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ):
run_clm_flax.main()
a = get_results(__lowerCamelCase )
self.assertLess(result["eval_perplexity"] , 1_00 )
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
a = self.get_auto_remove_tmp_dir()
a = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ):
run_summarization_flax.main()
a = get_results(__lowerCamelCase , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 10 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def __UpperCAmelCase ( self : Any ) -> Tuple:
a = self.get_auto_remove_tmp_dir()
a = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ):
run_mlm_flax.main()
a = get_results(__lowerCamelCase )
self.assertLess(result["eval_perplexity"] , 42 )
@slow
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
a = self.get_auto_remove_tmp_dir()
a = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ):
run_ta_mlm_flax.main()
a = get_results(__lowerCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def __UpperCAmelCase ( self : str ) -> List[Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
a = 7 if get_gpu_count() > 1 else 2
a = self.get_auto_remove_tmp_dir()
a = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ):
run_flax_ner.main()
a = get_results(__lowerCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
a = self.get_auto_remove_tmp_dir()
a = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ):
run_qa.main()
a = get_results(__lowerCamelCase )
self.assertGreaterEqual(result["eval_f1"] , 30 )
self.assertGreaterEqual(result["eval_exact"] , 30 )
| 709 |
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase : List[str] = [8, 5, 9, 7]
__lowerCAmelCase : str = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ) -> None:
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __UpperCAmelCase ( self : List[str] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __UpperCAmelCase ( self : str ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __UpperCAmelCase ( self : Dict ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __UpperCAmelCase ( self : Dict ) -> dict[int, list[int]]:
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Any ) -> None:
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
a = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def __UpperCAmelCase ( self : Any ) -> str:
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 0 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __magic_name__ ( ):
'''simple docstring'''
a = ArgumentParser("Transformers CLI tool", usage="transformers-cli <command> [<args>]" )
a = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(A )
DownloadCommand.register_subcommand(A )
EnvironmentCommand.register_subcommand(A )
RunCommand.register_subcommand(A )
ServeCommand.register_subcommand(A )
UserCommands.register_subcommand(A )
AddNewModelCommand.register_subcommand(A )
AddNewModelLikeCommand.register_subcommand(A )
LfsCommands.register_subcommand(A )
PTtoTFCommand.register_subcommand(A )
# Let's go
a = parser.parse_args()
if not hasattr(A, "func" ):
parser.print_help()
exit(1 )
# Run
a = args.func(A )
service.run()
if __name__ == "__main__":
main()
| 710 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = TypeVar('DatasetType', Dataset, IterableDataset)
def __magic_name__ ( A : List[DatasetType], A : Optional[List[float]] = None, A : Optional[int] = None, A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
def __magic_name__ ( A : List[DatasetType], A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : int = 0, ):
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A, info=A, split=A, axis=A )
else:
return _concatenate_iterable_datasets(A, info=A, split=A, axis=A )
| 662 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
__lowerCAmelCase : str = None
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[Any] = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
__lowerCAmelCase : Union[str, Any] = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
__lowerCAmelCase : List[str] = '▁'
# Segments (not really needed)
__lowerCAmelCase : Any = 0
__lowerCAmelCase : Union[str, Any] = 1
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Optional[Any] = 3
__lowerCAmelCase : List[str] = 4
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """left"""
SCREAMING_SNAKE_CASE_ : Tuple = XLNetTokenizer
def __init__( self : Optional[Any] , __lowerCamelCase : int=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : List[str]=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : Tuple="<s>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : str="<unk>" , __lowerCamelCase : Any="<sep>" , __lowerCamelCase : Optional[int]="<pad>" , __lowerCamelCase : List[str]="<cls>" , __lowerCamelCase : Optional[int]="<mask>" , __lowerCamelCase : Union[str, Any]=["<eop>", "<eod>"] , **__lowerCamelCase : Any , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
a = 3
a = do_lower_case
a = remove_space
a = keep_accents
a = vocab_file
a = False if not self.vocab_file else True
def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 711 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : List[str] = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
__lowerCAmelCase : Any = '▁'
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : str = BigBirdTokenizer
SCREAMING_SNAKE_CASE_ : str = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self : int , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Tuple="[CLS]" , **__lowerCamelCase : Optional[Any] , ) -> List[Any]:
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
a = vocab_file
a = False if not self.vocab_file else True
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 662 | 0 |
import torch
def __magic_name__ ( ):
'''simple docstring'''
if torch.cuda.is_available():
a = torch.cuda.device_count()
else:
a = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 712 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name", type=A, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", )
parser.add_argument(
"--dataset_config", type=A, default="wikitext-103-raw-v1", help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path", type=A, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", )
parser.add_argument(
"--shard_size", type=A, default=1000, help="Number of entries to go in a single shard.", )
parser.add_argument("--split", type=A, default="train", choices=["train", "test", "validation"] )
parser.add_argument(
"--limit", default=A, type=A, help="Limit the number of shards (used for debugging).", )
parser.add_argument(
"--max_length", type=A, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8.", )
parser.add_argument(
"--output_dir", default="tf-tpu", type=A, help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket.", )
a = parser.parse_args()
return args
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
def fn(A : Tuple ):
return tokenizer(examples["text"] )
return fn
def __magic_name__ ( A : Any ):
'''simple docstring'''
a = []
for i in range(len(tokenized_data["input_ids"] ) ):
a = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
a = tf.train.Features(feature=A )
a = tf.train.Example(features=A )
a = example.SerializeToString()
records.append(A )
return records
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
a = min(len(A ), args.limit )
a = dataset.select(range(A ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a = os.path.join(args.output_dir, args.split )
if not os.path.exists(A ):
os.makedirs(A )
else:
a = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
a = tokenize_function(A )
a = dataset.map(A, batched=A, num_proc=4, remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A : List[Any] ):
# Concatenate all texts.
a = {k: sum(examples[k], [] ) for k in examples.keys()}
a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a = {
k: [t[i : i + args.max_length] for i in range(0, A, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a = dataset_tokenized.map(A, batched=A, batch_size=1000, num_proc=4 )
a = 0
a = 0
for shard in range(0, len(A ), args.shard_size ):
a = grouped_dataset[shard : shard + args.shard_size]
a = len(dataset_snapshot["input_ids"] )
a = os.path.join(A, F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
a = get_serialized_examples(A )
with tf.io.TFRecordWriter(A ) as out_file:
for i in range(len(A ) ):
a = serialized_examples[i]
out_file.write(A )
print("Wrote file {} containing {} records".format(A, A ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""", "w" ) as f:
print(F"""Total {args.split} records: {total_records}""", file=A )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = parse_args()
main(args)
| 662 | 0 |
from math import asin, atan, cos, radians, sin, sqrt, tan
__lowerCAmelCase : List[Any] = 637_8137.0
__lowerCAmelCase : List[str] = 635_6752.31_4245
__lowerCAmelCase : Optional[int] = 637_8137
def __magic_name__ ( A : float, A : float, A : float, A : float ):
'''simple docstring'''
a = (AXIS_A - AXIS_B) / AXIS_A
a = atan((1 - flattening) * tan(radians(A ) ) )
a = atan((1 - flattening) * tan(radians(A ) ) )
a = radians(A )
a = radians(A )
# Equation
a = sin((phi_a - phi_a) / 2 )
a = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
a = sqrt(sin_sq_phi + (cos(A ) * cos(A ) * sin_sq_lambda) )
return 2 * RADIUS * asin(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
a = {}
a = tokenizer(example["content"], truncation=A )["input_ids"]
a = len(example["content"] ) / len(output["input_ids"] )
return output
__lowerCAmelCase : Dict = HfArgumentParser(PretokenizationArguments)
__lowerCAmelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCAmelCase : List[Any] = multiprocessing.cpu_count()
__lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__lowerCAmelCase : List[Any] = time.time()
__lowerCAmelCase : str = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : int = time.time()
__lowerCAmelCase : Optional[int] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 662 | 0 |
class snake_case__ :
"""simple docstring"""
def __init__( self : List[Any] ) -> List[str]:
a = ""
a = ""
a = []
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
a = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
a = self.__min_dist_top_down_dp(__lowerCamelCase , n - 1 )
a = self.__min_dist_top_down_dp(m - 1 , __lowerCamelCase )
a = self.__min_dist_top_down_dp(m - 1 , n - 1 )
a = 1 + min(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self.dp[m][n]
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : str ) -> int:
a = worda
a = worda
a = [[-1 for _ in range(len(__lowerCamelCase ) )] for _ in range(len(__lowerCamelCase ) )]
return self.__min_dist_top_down_dp(len(__lowerCamelCase ) - 1 , len(__lowerCamelCase ) - 1 )
def __UpperCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : str ) -> int:
a = worda
a = worda
a = len(__lowerCamelCase )
a = len(__lowerCamelCase )
a = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
a = j
elif j == 0: # second string is empty
a = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
a = self.dp[i - 1][j - 1]
else:
a = self.dp[i][j - 1]
a = self.dp[i - 1][j]
a = self.dp[i - 1][j - 1]
a = 1 + min(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self.dp[m][n]
if __name__ == "__main__":
__lowerCAmelCase : str = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
__lowerCAmelCase : Optional[Any] = input('Enter the first string: ').strip()
__lowerCAmelCase : Optional[int] = input('Enter the second string: ').strip()
print()
print(F'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(F'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 714 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Union[str, Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=1 ) -> Union[str, Any]:
a = tokenizer
a = dataset
a = len(__lowerCamelCase ) if n_tasks is None else n_tasks
a = n_copies
def __iter__( self : Tuple ) -> str:
a = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
a = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]:
a = start_length
a = eof_strings
a = tokenizer
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
a = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__lowerCamelCase )
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a = re.split("(%s)" % "|".join(A ), A )
# last string should be ""
return "".join(string_list[:-2] )
def __magic_name__ ( A : Union[str, Any], A : Optional[Any], A : List[Any], A : Optional[Any], A : List[str], A : List[Any]=20, **A : Union[str, Any] ):
'''simple docstring'''
a = defaultdict(A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A ) ):
with torch.no_grad():
a = batch["ids"].shape[-1]
a = accelerator.unwrap_model(A ).generate(
input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=A, **A )
# each task is generated batch_size times
a = batch["task_id"].repeat(A )
a = accelerator.pad_across_processes(
A, dim=1, pad_index=tokenizer.pad_token_id )
a , a = accelerator.gather((generated_tokens, generated_tasks) )
a = generated_tokens.cpu().numpy()
a = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A, A ):
gen_token_dict[task].append(A )
a = [[] for _ in range(A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a = tokenizer.decode(A, skip_special_tokens=A, clean_up_tokenization_spaces=A )
code_gens[task].append(remove_last_block(A ) )
return code_gens
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser(A )
a = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a = "false"
if args.num_workers is None:
a = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a = Accelerator()
set_seed(args.seed, device_specific=A )
# Load model and tokenizer
a = AutoTokenizer.from_pretrained(args.model_ckpt )
a = tokenizer.eos_token
a = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0, A, A )] ),
}
# Load evaluation dataset and metric
a = load_dataset("openai_humaneval" )
a = load_metric("code_eval" )
a = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
a = args.n_samples // args.batch_size
a = TokenizedDataset(A, human_eval["test"], n_copies=A, n_tasks=A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a = DataLoader(A, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a = code_eval_metric.compute(references=[""], predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
a , a = accelerator.prepare(A, A )
a = complete_code(
A, A, A, A, n_tasks=A, batch_size=args.batch_size, **A, )
if accelerator.is_main_process:
a = []
for task in tqdm(range(A ) ):
a = human_eval["test"][task]["test"]
a = F"""check({human_eval["test"][task]["entry_point"]})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
a , a = code_eval_metric.compute(
references=A, predictions=A, num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file, "w" ) as fp:
json.dump(A, A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 662 | 0 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__lowerCAmelCase : Dict = 'src/transformers'
__lowerCAmelCase : Optional[Any] = 'docs/source/en/tasks'
def __magic_name__ ( A : str, A : Dict, A : str ):
'''simple docstring'''
with open(A, "r", encoding="utf-8", newline="\n" ) as f:
a = f.readlines()
# Find the start prompt.
a = 0
while not lines[start_index].startswith(A ):
start_index += 1
start_index += 1
a = start_index
while not lines[end_index].startswith(A ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : Any = direct_transformers_import(TRANSFORMERS_PATH)
__lowerCAmelCase : int = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__lowerCAmelCase : Dict = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def __magic_name__ ( A : str ):
'''simple docstring'''
a = TASK_GUIDE_TO_MODELS[task_guide]
a = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(A, set() )
a = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def __magic_name__ ( A : Optional[int], A : Optional[int]=False ):
'''simple docstring'''
a , a , a , a = _find_text_in_file(
filename=os.path.join(A, A ), start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->", end_prompt="<!--End of the generated tip-->", )
a = get_model_list_for_task(A )
if current_list != new_list:
if overwrite:
with open(os.path.join(A, A ), "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
" to fix this." )
if __name__ == "__main__":
__lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__lowerCAmelCase : Tuple = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 715 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Any = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__lowerCAmelCase : Dict = logging.get_logger(__name__)
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , *__lowerCamelCase : int , **__lowerCamelCase : Optional[Any] ) -> None:
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 716 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : Optional[int] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a = {"unk_token": "<unk>"}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
a = "lower newer"
a = "lower newer"
return input_text, output_text
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = "lower newer"
a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokens + [tokenizer.unk_token]
a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self : Any ) -> str:
a = self.get_tokenizer()
a = "Encode this sequence."
a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# Testing spaces after special tokens
a = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
a = "Encode <mask> sequence"
a = "Encode <mask>sequence"
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
pass
def __UpperCAmelCase ( self : int ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = "A, <mask> AllenNLP sentence."
a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a = f"""{text_of_1_token} {text_of_1_token}"""
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
| 662 | 0 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = PriorTransformer
SCREAMING_SNAKE_CASE_ : List[str] = """hidden_states"""
@property
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
a = 4
a = 8
a = 7
a = floats_tensor((batch_size, embedding_dim) ).to(__lowerCamelCase )
a = floats_tensor((batch_size, embedding_dim) ).to(__lowerCamelCase )
a = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(__lowerCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, Any]=0 ) -> Dict:
torch.manual_seed(__lowerCamelCase )
a = 4
a = 8
a = 7
a = torch.randn((batch_size, embedding_dim) ).to(__lowerCamelCase )
a = torch.randn((batch_size, embedding_dim) ).to(__lowerCamelCase )
a = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__lowerCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
return (4, 8)
@property
def __UpperCAmelCase ( self : str ) -> str:
return (4, 8)
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
a = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a , a = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__lowerCamelCase )
a = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def __UpperCAmelCase ( self : Dict ) -> Dict:
a , a = self.prepare_init_args_and_inputs_for_common()
a = self.model_class(**__lowerCamelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[Any]:
a = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
a = model.to(__lowerCamelCase )
if hasattr(__lowerCamelCase , "set_default_attn_processor" ):
model.set_default_attn_processor()
a = self.get_dummy_seed_input()
with torch.no_grad():
a = model(**__lowerCamelCase )[0]
a = output[0, :5].flatten().cpu()
print(__lowerCamelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
a = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(__lowerCamelCase , __lowerCamelCase , rtol=1e-2 ) )
@slow
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str , __lowerCamelCase : str=1 , __lowerCamelCase : Tuple=7_68 , __lowerCamelCase : List[Any]=77 , __lowerCamelCase : Optional[Any]=0 ) -> Optional[Any]:
torch.manual_seed(__lowerCamelCase )
a = batch_size
a = embedding_dim
a = num_embeddings
a = torch.randn((batch_size, embedding_dim) ).to(__lowerCamelCase )
a = torch.randn((batch_size, embedding_dim) ).to(__lowerCamelCase )
a = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__lowerCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ) -> Tuple:
a = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" )
model.to(__lowerCamelCase )
a = self.get_dummy_seed_input(seed=__lowerCamelCase )
with torch.no_grad():
a = model(**__lowerCamelCase )[0]
assert list(sample.shape ) == [1, 7_68]
a = sample[0, :8].flatten().cpu()
print(__lowerCamelCase )
a = torch.tensor(__lowerCamelCase )
assert torch_all_close(__lowerCamelCase , __lowerCamelCase , atol=1e-3 )
| 717 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__lowerCAmelCase : int = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 662 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def __magic_name__ ( A : str ):
'''simple docstring'''
a = "huggingface/label-files"
a = "imagenet-1k-id2label.json"
a = json.load(open(hf_hub_download(A, A, repo_type="dataset" ), "r" ) )
a = {int(A ): v for k, v in idalabel.items()}
a = {v: k for k, v in idalabel.items()}
a = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
a = BitConfig(
conv_layer=A, num_labels=1000, idalabel=A, labelaid=A, )
return config
def __magic_name__ ( A : Tuple ):
'''simple docstring'''
if "stem.conv" in name:
a = name.replace("stem.conv", "bit.embedder.convolution" )
if "blocks" in name:
a = name.replace("blocks", "layers" )
if "head.fc" in name:
a = name.replace("head.fc", "classifier.1" )
if name.startswith("norm" ):
a = "bit." + name
if "bit" not in name and "classifier" not in name:
a = "bit.encoder." + name
return name
def __magic_name__ ( ):
'''simple docstring'''
a = "http://images.cocodataset.org/val2017/000000039769.jpg"
a = Image.open(requests.get(A, stream=A ).raw )
return im
@torch.no_grad()
def __magic_name__ ( A : Optional[int], A : int, A : int=False ):
'''simple docstring'''
a = get_config(A )
# load original model from timm
a = create_model(A, pretrained=A )
timm_model.eval()
# load state_dict of original model
a = timm_model.state_dict()
for key in state_dict.copy().keys():
a = state_dict.pop(A )
a = val.squeeze() if "head" in key else val
# load HuggingFace model
a = BitForImageClassification(A )
model.eval()
model.load_state_dict(A )
# create image processor
a = create_transform(**resolve_data_config({}, model=A ) )
a = transform.transforms
a = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
a = BitImageProcessor(
do_resize=A, size={"shortest_edge": timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=A, crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]}, do_normalize=A, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
a = prepare_img()
a = transform(A ).unsqueeze(0 )
a = processor(A, return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(A, A )
# verify logits
with torch.no_grad():
a = model(A )
a = outputs.logits
print("Logits:", logits[0, :3] )
print("Predicted class:", model.config.idalabel[logits.argmax(-1 ).item()] )
a = timm_model(A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A, outputs.logits, atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(A ).mkdir(exist_ok=A )
print(F"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
processor.save_pretrained(A )
if push_to_hub:
print(F"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(F"""ybelkada/{model_name}""" )
processor.push_to_hub(F"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__lowerCAmelCase : int = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 718 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : Dict = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 0 |
def __magic_name__ ( A : list[list[int]], A : int, A : int, A : list[int] ):
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def __magic_name__ ( A : list[list[int]], A : list[int], A : int ):
'''simple docstring'''
if curr_ind == len(A ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0, len(A ) ):
if valid_connection(A, A, A, A ):
# Insert current vertex into path as next transition
a = next_ver
# Validate created path
if util_hamilton_cycle(A, A, curr_ind + 1 ):
return True
# Backtrack
a = -1
return False
def __magic_name__ ( A : list[list[int]], A : int = 0 ):
'''simple docstring'''
a = [-1] * (len(A ) + 1)
# initialize start and end of path with starting index
a = a = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(A, A, 1 ) else []
| 719 |
import math
import flax.linen as nn
import jax.numpy as jnp
def __magic_name__ ( A : jnp.ndarray, A : int, A : float = 1, A : float = 1, A : float = 1.0E4, A : bool = False, A : float = 1.0, ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
a = float(embedding_dim // 2 )
a = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
a = min_timescale * jnp.exp(jnp.arange(A, dtype=jnp.floataa ) * -log_timescale_increment )
a = jnp.expand_dims(A, 1 ) * jnp.expand_dims(A, 0 )
# scale embeddings
a = scale * emb
if flip_sin_to_cos:
a = jnp.concatenate([jnp.cos(A ), jnp.sin(A )], axis=1 )
else:
a = jnp.concatenate([jnp.sin(A ), jnp.cos(A )], axis=1 )
a = jnp.reshape(A, [jnp.shape(A )[0], embedding_dim] )
return signal
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__lowerCamelCase )
a = nn.silu(__lowerCamelCase )
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__lowerCamelCase )
return temb
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : float = 1
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : int ) -> Union[str, Any]:
return get_sinusoidal_embeddings(
__lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 662 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[Any] = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
__lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 720 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> Dict:
a = tempfile.mkdtemp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
a = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = self.get_image_processor()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
a = self.get_image_processor(do_normalize=__lowerCamelCase )
a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCamelCase , return_tensors="np" )
a = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCamelCase )
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 662 | 0 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : float = 3.0
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Tuple:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} )
self.assertDictEqual(MockClass(a=2 , b=__lowerCamelCase ).to_kwargs() , {"a": 2, "b": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} )
@require_cuda
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
a = GradScalerKwargs(init_scale=10_24 , growth_factor=2 )
AcceleratorState._reset_state()
a = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
a = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 20_00 )
self.assertEqual(scaler._enabled , __lowerCamelCase )
@require_multi_gpu
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(__lowerCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
__lowerCAmelCase : Any = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__lowerCAmelCase : str = Accelerator(kwargs_handlers=[ddp_scaler])
__lowerCAmelCase : Dict = torch.nn.Linear(100, 200)
__lowerCAmelCase : Optional[int] = accelerator.prepare(model)
# Check the values changed in kwargs
__lowerCAmelCase : Optional[Any] = ''
__lowerCAmelCase : List[Any] = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 721 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$", A ).groups()[0]
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None ) -> Tuple:
a = file_names
a = image_transform
a = label_to_id
def __len__( self : Any ) -> Tuple:
return len(self.file_names )
def __getitem__( self : List[Any] , __lowerCamelCase : List[Any] ) -> int:
a = self.file_names[idx]
a = PIL.Image.open(__lowerCamelCase )
a = raw_image.convert("RGB" )
if self.image_transform is not None:
a = self.image_transform(__lowerCamelCase )
a = extract_label(__lowerCamelCase )
if self.label_to_id is not None:
a = self.label_to_id[label]
return {"image": image, "label": label}
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
if args.with_tracking:
a = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir )
else:
a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config["lr"]
a = int(config["num_epochs"] )
a = int(config["seed"] )
a = int(config["batch_size"] )
a = config["image_size"]
if not isinstance(A, (list, tuple) ):
a = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps, "isdigit" ):
if args.checkpointing_steps == "epoch":
a = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
a = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
a = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
a = os.path.split(A )[-1].split("." )[0]
accelerator.init_trackers(A, A )
# Grab all the image filenames
a = [os.path.join(args.data_dir, A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
a = [extract_label(A ) for fname in file_names]
a = list(set(A ) )
id_to_label.sort()
a = {lbl: i for i, lbl in enumerate(A )}
# Set the seed before splitting the data.
np.random.seed(A )
torch.manual_seed(A )
torch.cuda.manual_seed_all(A )
# Split our filenames between train and validation
a = np.random.permutation(len(A ) )
a = int(0.8 * len(A ) )
a = random_perm[:cut]
a = random_perm[cut:]
# For training we use a simple RandomResizedCrop
a = Compose([RandomResizedCrop(A, scale=(0.5, 1.0) ), ToTensor()] )
a = PetsDataset(
[file_names[i] for i in train_split], image_transform=A, label_to_id=A )
# For evaluation, we use a deterministic Resize
a = Compose([Resize(A ), ToTensor()] )
a = PetsDataset([file_names[i] for i in eval_split], image_transform=A, label_to_id=A )
# Instantiate dataloaders.
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = create_model("resnet50d", pretrained=A, num_classes=len(A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
a = False
for param in model.get_classifier().parameters():
a = True
# We normalize the batches of images to be a bit faster.
a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
a = torch.optim.Adam(params=model.parameters(), lr=lr / 25 )
# Instantiate learning rate scheduler
a = OneCycleLR(optimizer=A, max_lr=A, epochs=A, steps_per_epoch=len(A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
A, A, A, A, A )
# We need to keep track of how many total steps we have iterated over
a = 0
# We also need to keep track of the starting epoch so files are named properly
a = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
a = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
a = os.path.splitext(A )[0]
if "epoch" in training_difference:
a = int(training_difference.replace("epoch_", "" ) ) + 1
a = None
else:
a = int(training_difference.replace("step_", "" ) )
a = resume_step // len(A )
resume_step -= starting_epoch * len(A )
# Now we train the model
for epoch in range(A, A ):
model.train()
if args.with_tracking:
a = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
a = accelerator.skip_first_batches(A, A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
a = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
a = model(A )
a = torch.nn.functional.cross_entropy(A, batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(A, A ):
a = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
model.eval()
a = 0
a = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
with torch.no_grad():
a = model(A )
a = outputs.argmax(dim=-1 )
a , a = accelerator.gather_for_metrics((predictions, batch["label"]) )
a = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
a = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(A ),
"epoch": epoch,
}, step=A, )
if checkpointing_steps == "epoch":
a = F"""epoch_{epoch}"""
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
if args.with_tracking:
accelerator.end_training()
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir", required=A, help="The data folder on disk." )
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision", type=A, default=A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps", type=A, default=A, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", )
parser.add_argument(
"--output_dir", type=A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--resume_from_checkpoint", type=A, default=A, help="If the training should continue from a checkpoint folder.", )
parser.add_argument(
"--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", )
parser.add_argument(
"--project_dir", type=A, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", )
a = parser.parse_args()
a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(A, A )
if __name__ == "__main__":
main()
| 662 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase : int = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 700 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : Tuple = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
__lowerCAmelCase : Dict = '|'.join(sys.argv[1:])
__lowerCAmelCase : List[Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : List[Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 662 | 0 |
from __future__ import annotations
import math
def __magic_name__ ( A : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(A ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCAmelCase : Any = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def __magic_name__ ( A : int ):
'''simple docstring'''
if not isinstance(A, A ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
a = []
for num in range(len(A ) ):
a = 0
while 2 * i * i <= odd_composites[num]:
a = odd_composites[num] - 2 * i * i
if is_prime(A ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(A ) == n:
return list_nums
return []
def __magic_name__ ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 701 |
def __magic_name__ ( A : int, A : int, A : int ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
a = _modexpt(A, exponent // 2, A ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A, exponent - 1, A )) % modulo_value
def __magic_name__ ( A : int = 1777, A : int = 1855, A : int = 8 ):
'''simple docstring'''
a = base
for _ in range(1, A ):
a = _modexpt(A, A, 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 662 | 0 |
'''simple docstring'''
import random
def __magic_name__ ( A : int ):
'''simple docstring'''
a = num - 1
a = 0
while s % 2 == 0:
a = s // 2
t += 1
for _ in range(5 ):
a = random.randrange(2, num - 1 )
a = pow(A, A, A )
if v != 1:
a = 0
while v != (num - 1):
if i == t - 1:
return False
else:
a = i + 1
a = (v**2) % num
return True
def __magic_name__ ( A : int ):
'''simple docstring'''
if num < 2:
return False
a = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(A )
def __magic_name__ ( A : int = 1024 ):
'''simple docstring'''
while True:
a = random.randrange(2 ** (keysize - 1), 2 ** (keysize) )
if is_prime_low_num(A ):
return num
if __name__ == "__main__":
__lowerCAmelCase : Any = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 702 |
def __magic_name__ ( A : str, A : str ):
'''simple docstring'''
def get_matched_characters(A : str, A : str ) -> str:
a = []
a = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a = int(max(0, i - limit ) )
a = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A )
a = F"""{_stra[0:_stra.index(A )]} {_stra[_stra.index(A ) + 1:]}"""
return "".join(A )
# matching characters
a = get_matched_characters(A, A )
a = get_matched_characters(A, A )
a = len(A )
# transposition
a = (
len([(ca, ca) for ca, ca in zip(A, A ) if ca != ca] ) // 2
)
if not match_count:
a = 0.0
else:
a = (
1
/ 3
* (
match_count / len(A )
+ match_count / len(A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 662 | 0 |
import copy
import re
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = """hp"""
SCREAMING_SNAKE_CASE_ : str = {}
SCREAMING_SNAKE_CASE_ : Dict = None
@classmethod
def __UpperCAmelCase ( cls : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] ) -> Dict:
a = prefix
a = defaults
cls.build_naming_info()
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
if len(__lowerCamelCase ) == 0:
return ""
a = None
if any(char.isdigit() for char in word ):
raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__lowerCamelCase ) + 1 ):
a = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
a = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__lowerCamelCase : Union[str, Any] ):
a = ""
while integer != 0:
a = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
a = 0
while True:
a = word + "#" + int_to_alphabetic(__lowerCamelCase )
if sword in info["reverse_short_word"]:
continue
else:
a = sword
break
a = short_word
a = word
return short_word
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] ) -> List[Any]:
a = param_name.split("_" )
a = [TrialShortNamer.shortname_for_word(__lowerCamelCase , __lowerCamelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
a = ["", "_"]
for separator in separators:
a = separator.join(__lowerCamelCase )
if shortname not in info["reverse_short_param"]:
a = shortname
a = param_name
return shortname
return param_name
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : str ) -> Optional[int]:
a = TrialShortNamer.shortname_for_key(__lowerCamelCase , __lowerCamelCase )
a = short_name
a = param_name
@classmethod
def __UpperCAmelCase ( cls : str ) -> Union[str, Any]:
if cls.NAMING_INFO is not None:
return
a = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
a = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__lowerCamelCase , __lowerCamelCase )
a = info
@classmethod
def __UpperCAmelCase ( cls : List[str] , __lowerCamelCase : Any ) -> Any:
cls.build_naming_info()
assert cls.PREFIX is not None
a = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
a = cls.NAMING_INFO["short_param"][k]
if isinstance(__lowerCamelCase , __lowerCamelCase ):
a = 1 if v else 0
a = "" if isinstance(__lowerCamelCase , (int, float) ) else "-"
a = f"""{key}{sep}{v}"""
name.append(__lowerCamelCase )
return "_".join(__lowerCamelCase )
@classmethod
def __UpperCAmelCase ( cls : Dict , __lowerCamelCase : Optional[Any] ) -> Tuple:
a = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
a = []
else:
a = repr.split("_" )
a = {}
for value in values:
if "-" in value:
a , a = value.split("-" )
else:
a = re.sub("[0-9.]" , "" , __lowerCamelCase )
a = float(re.sub("[^0-9.]" , "" , __lowerCamelCase ) )
a = cls.NAMING_INFO["reverse_short_param"][p_k]
a = p_v
for k in cls.DEFAULTS:
if k not in parameters:
a = cls.DEFAULTS[k]
return parameters
| 703 |
__lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)}
def __magic_name__ ( A : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) )
def __magic_name__ ( ):
'''simple docstring'''
return sum(
number
for number in range(1000, 1000000 )
if number == digits_fifth_powers_sum(A ) )
if __name__ == "__main__":
print(solution())
| 662 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[int] = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 704 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any=7 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : int=30 , __lowerCamelCase : int=4_00 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=1 / 2_55 , __lowerCamelCase : Optional[int]=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
a = parent
a = batch_size
a = num_channels
a = min_resolution
a = max_resolution
a = do_resize
a = size
a = do_normalize
a = image_mean
a = image_std
a = do_rescale
a = rescale_factor
a = do_pad
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str=False ) -> List[str]:
if not batched:
a = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
a , a = image.size
else:
a , a = image.shape[1], image.shape[2]
if w < h:
a = int(self.size["shortest_edge"] * h / w )
a = self.size["shortest_edge"]
elif w > h:
a = self.size["shortest_edge"]
a = int(self.size["shortest_edge"] * w / h )
else:
a = self.size["shortest_edge"]
a = self.size["shortest_edge"]
else:
a = []
for image in image_inputs:
a , a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> int:
pass
def __UpperCAmelCase ( self : Any ) -> Any:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Any ) -> List[str]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCAmelCase ( self : Any ) -> List[Any]:
# prepare image and target
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"image_id": 3_97_69, "annotations": target}
# encode them
a = DetaImageProcessor()
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
# prepare image, target and masks_path
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
a = DetaImageProcessor(format="coco_panoptic" )
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify masks
a = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
| 662 | 0 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__lowerCAmelCase : Optional[int] = {
'E': 12.70,
'T': 9.0_6,
'A': 8.1_7,
'O': 7.5_1,
'I': 6.9_7,
'N': 6.7_5,
'S': 6.3_3,
'H': 6.0_9,
'R': 5.9_9,
'D': 4.2_5,
'L': 4.0_3,
'C': 2.7_8,
'U': 2.7_6,
'M': 2.4_1,
'W': 2.3_6,
'F': 2.2_3,
'G': 2.0_2,
'Y': 1.9_7,
'P': 1.9_3,
'B': 1.2_9,
'V': 0.9_8,
'K': 0.7_7,
'J': 0.1_5,
'X': 0.1_5,
'Q': 0.1_0,
'Z': 0.0_7,
}
__lowerCAmelCase : Tuple = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
__lowerCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __magic_name__ ( A : str ):
'''simple docstring'''
a = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __magic_name__ ( A : tuple ):
'''simple docstring'''
return x[0]
def __magic_name__ ( A : str ):
'''simple docstring'''
a = get_letter_count(A )
a = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(A )
a = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=A )
a = "".join(freq_to_letter[freq] )
a = list(freq_to_letter_str.items() )
freq_pairs.sort(key=A, reverse=A )
a = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(A )
def __magic_name__ ( A : str ):
'''simple docstring'''
a = get_frequency_order(A )
a = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
def __magic_name__ ( A : list ):
'''simple docstring'''
for i in range(len(A ) - 1, 0, -1 ):
a = False
for j in range(A, 0, -1 ):
if unsorted[j] < unsorted[j - 1]:
a , a = unsorted[j - 1], unsorted[j]
a = True
for j in range(A ):
if unsorted[j] > unsorted[j + 1]:
a , a = unsorted[j + 1], unsorted[j]
a = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(',')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 662 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = PegasusConfig
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
SCREAMING_SNAKE_CASE_ : Dict = """gelu"""
def __init__( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : Any=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : Any=99 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : List[Any]=37 , __lowerCamelCase : int=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Union[str, Any]=40 , __lowerCamelCase : str=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Tuple=0 , ) -> List[str]:
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = eos_token_id
a = pad_token_id
a = bos_token_id
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a = tf.concat([input_ids, eos_tensor] , axis=1 )
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
a = prepare_pegasus_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ) -> Optional[Any]:
a = TFPegasusModel(config=__lowerCamelCase ).get_decoder()
a = inputs_dict["input_ids"]
a = input_ids[:1, :]
a = inputs_dict["attention_mask"][:1, :]
a = inputs_dict["head_mask"]
a = 1
# first forward pass
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
a , a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) , config.vocab_size )
a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a = tf.concat([input_ids, next_tokens] , axis=-1 )
a = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a = output_from_no_past[:, -3:, random_slice_idx]
a = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1e-3 )
def __magic_name__ ( A : List[str], A : int, A : Union[str, Any], A : List[str]=None, A : str=None, A : Optional[Any]=None, A : List[str]=None, A : int=None, ):
if attention_mask is None:
a = tf.cast(tf.math.not_equal(A, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
a = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
a = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class snake_case__ (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : List[Any] = (
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : int = False
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
a = TFPegasusModelTester(self )
a = ConfigTester(self , config_class=__lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
SCREAMING_SNAKE_CASE_ : Tuple = [
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
SCREAMING_SNAKE_CASE_ : List[str] = """google/pegasus-xsum"""
@cached_property
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __UpperCAmelCase ( self : Any ) -> str:
a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> Optional[int]:
a = self.translate_src_text(**__lowerCamelCase )
assert self.expected_text == generated_words
def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Tuple ) -> Dict:
a = self.tokenizer(self.src_text , **__lowerCamelCase , padding=__lowerCamelCase , return_tensors="tf" )
a = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCamelCase , )
a = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCamelCase )
return generated_words
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
self._assert_generated_batch_equal_expected()
| 706 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowerCAmelCase : Optional[Any] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
__lowerCAmelCase : str = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
__lowerCAmelCase : List[Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[List[List[str]]] , __lowerCamelCase : List[List[str]] , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__lowerCamelCase , hypotheses=__lowerCamelCase , min_len=__lowerCamelCase , max_len=__lowerCamelCase )
}
| 662 | 0 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__lowerCAmelCase : Optional[Any] = 3
def __magic_name__ ( A : int ):
'''simple docstring'''
print("Generating primitive root of p" )
while True:
a = random.randrange(3, A )
if pow(A, 2, A ) == 1:
continue
if pow(A, A, A ) == 1:
continue
return g
def __magic_name__ ( A : int ):
'''simple docstring'''
print("Generating prime p..." )
a = rabin_miller.generate_large_prime(A ) # select large prime number.
a = primitive_root(A ) # one primitive root on modulo p.
a = random.randrange(3, A ) # private_key -> have to be greater than 2 for safety.
a = cryptomath.find_mod_inverse(pow(A, A, A ), A )
a = (key_size, e_a, e_a, p)
a = (key_size, d)
return public_key, private_key
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
a , a = generate_key(A )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""", "w" ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""", "w" ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def __magic_name__ ( ):
'''simple docstring'''
print("Making key files..." )
make_key_files("elgamal", 2048 )
print("Key files generation successful" )
if __name__ == "__main__":
main()
| 707 |
import argparse
import os
import re
__lowerCAmelCase : Union[str, Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__lowerCAmelCase : Dict = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
__lowerCAmelCase : Any = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __magic_name__ ( A : int, A : bool = False ):
'''simple docstring'''
with open(A, "r", encoding="utf-8" ) as f:
a = f.read()
a = content.split("\n" )
a = []
a = 0
while line_idx < len(A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a = len(re.search(R"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a = sorted(A, key=lambda A : _re_identifier.search(A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(A, "w", encoding="utf-8" ) as f:
f.write("\n".join(A ) )
elif "\n".join(A ) != content:
return True
def __magic_name__ ( A : bool = False ):
'''simple docstring'''
a = [os.path.join(A, A ) for f in os.listdir(A ) if f.endswith(".py" )]
a = [sort_auto_mapping(A, overwrite=A ) for fname in fnames]
if not overwrite and any(A ):
a = [f for f, d in zip(A, A ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {", ".join(A )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 662 | 0 |
import warnings
from functools import wraps
from typing import Callable
def __magic_name__ ( A : Callable ):
'''simple docstring'''
@wraps(A )
def _inner_fn(*A : Any, **A : Optional[Any] ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future."""), A, )
return fn(*A, **A )
return _inner_fn
| 708 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = '▁'
__lowerCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model'}
__lowerCAmelCase : int = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
__lowerCAmelCase : Any = {
'google/reformer-crime-and-punishment': 52_4288,
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Dict=[] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ) -> None:
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, int]:
a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Optional[Any]:
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : str , __lowerCamelCase : Tuple ) -> List[Any]:
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : int , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict ) -> Any:
return self.sp_model.piece_to_id(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ) -> str:
if index < self.sp_model.get_piece_size():
a = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
a = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 662 | 0 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 709 |
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase : List[str] = [8, 5, 9, 7]
__lowerCAmelCase : str = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ) -> None:
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __UpperCAmelCase ( self : List[str] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __UpperCAmelCase ( self : str ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __UpperCAmelCase ( self : Dict ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __UpperCAmelCase ( self : Dict ) -> dict[int, list[int]]:
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Any ) -> None:
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
a = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def __UpperCAmelCase ( self : Any ) -> str:
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 0 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
def __magic_name__ ( A : str, A : str ):
'''simple docstring'''
a = RobertaPreLayerNormConfig.from_pretrained(
A, architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
a = torch.load(hf_hub_download(repo_id=A, filename="pytorch_model.bin" ) )
a = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
a = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
a = tensor_value
a = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=A, config=A, state_dict=A )
model.save_pretrained(A )
# convert tokenizer
a = AutoTokenizer.from_pretrained(A )
tokenizer.save_pretrained(A )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase : str = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 710 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = TypeVar('DatasetType', Dataset, IterableDataset)
def __magic_name__ ( A : List[DatasetType], A : Optional[List[float]] = None, A : Optional[int] = None, A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
def __magic_name__ ( A : List[DatasetType], A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : int = 0, ):
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A, info=A, split=A, axis=A )
else:
return _concatenate_iterable_datasets(A, info=A, split=A, axis=A )
| 662 | 0 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( A : Tuple, A : List[str], A : int ):
'''simple docstring'''
a = LxmertConfig.from_json_file(A )
print(F"""Building PyTorch model from configuration: {config}""" )
a = LxmertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A, A, A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict(), A )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 711 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : List[str] = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
__lowerCAmelCase : Any = '▁'
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : str = BigBirdTokenizer
SCREAMING_SNAKE_CASE_ : str = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self : int , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Tuple="[CLS]" , **__lowerCamelCase : Optional[Any] , ) -> List[Any]:
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
a = vocab_file
a = False if not self.vocab_file else True
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 662 | 0 |
__lowerCAmelCase : int = 256
# Modulus to hash a string
__lowerCAmelCase : Dict = 100_0003
def __magic_name__ ( A : str, A : str ):
'''simple docstring'''
a = len(A )
a = len(A )
if p_len > t_len:
return False
a = 0
a = 0
a = 1
# Calculating the hash of pattern and substring of text
for i in range(A ):
a = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
a = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
a = (modulus_power * alphabet_size) % modulus
for i in range(0, t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
a = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __magic_name__ ( ):
'''simple docstring'''
a = "abc1abc12"
a = "alskfjaldsabc1abc1abc12k23adsfabcabc"
a = "alskfjaldsk23adsfabcabc"
assert rabin_karp(A, A ) and not rabin_karp(A, A )
# Test 2)
a = "ABABX"
a = "ABABZABABYABABX"
assert rabin_karp(A, A )
# Test 3)
a = "AAAB"
a = "ABAAAAAB"
assert rabin_karp(A, A )
# Test 4)
a = "abcdabcy"
a = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(A, A )
# Test 5)
a = "Lü"
a = "Lüsai"
assert rabin_karp(A, A )
a = "Lue"
assert not rabin_karp(A, A )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 712 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name", type=A, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", )
parser.add_argument(
"--dataset_config", type=A, default="wikitext-103-raw-v1", help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path", type=A, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", )
parser.add_argument(
"--shard_size", type=A, default=1000, help="Number of entries to go in a single shard.", )
parser.add_argument("--split", type=A, default="train", choices=["train", "test", "validation"] )
parser.add_argument(
"--limit", default=A, type=A, help="Limit the number of shards (used for debugging).", )
parser.add_argument(
"--max_length", type=A, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8.", )
parser.add_argument(
"--output_dir", default="tf-tpu", type=A, help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket.", )
a = parser.parse_args()
return args
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
def fn(A : Tuple ):
return tokenizer(examples["text"] )
return fn
def __magic_name__ ( A : Any ):
'''simple docstring'''
a = []
for i in range(len(tokenized_data["input_ids"] ) ):
a = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
a = tf.train.Features(feature=A )
a = tf.train.Example(features=A )
a = example.SerializeToString()
records.append(A )
return records
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
a = min(len(A ), args.limit )
a = dataset.select(range(A ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a = os.path.join(args.output_dir, args.split )
if not os.path.exists(A ):
os.makedirs(A )
else:
a = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
a = tokenize_function(A )
a = dataset.map(A, batched=A, num_proc=4, remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A : List[Any] ):
# Concatenate all texts.
a = {k: sum(examples[k], [] ) for k in examples.keys()}
a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a = {
k: [t[i : i + args.max_length] for i in range(0, A, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a = dataset_tokenized.map(A, batched=A, batch_size=1000, num_proc=4 )
a = 0
a = 0
for shard in range(0, len(A ), args.shard_size ):
a = grouped_dataset[shard : shard + args.shard_size]
a = len(dataset_snapshot["input_ids"] )
a = os.path.join(A, F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
a = get_serialized_examples(A )
with tf.io.TFRecordWriter(A ) as out_file:
for i in range(len(A ) ):
a = serialized_examples[i]
out_file.write(A )
print("Wrote file {} containing {} records".format(A, A ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""", "w" ) as f:
print(F"""Total {args.split} records: {total_records}""", file=A )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = parse_args()
main(args)
| 662 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Any = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 713 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
a = {}
a = tokenizer(example["content"], truncation=A )["input_ids"]
a = len(example["content"] ) / len(output["input_ids"] )
return output
__lowerCAmelCase : Dict = HfArgumentParser(PretokenizationArguments)
__lowerCAmelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCAmelCase : List[Any] = multiprocessing.cpu_count()
__lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__lowerCAmelCase : List[Any] = time.time()
__lowerCAmelCase : str = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : int = time.time()
__lowerCAmelCase : Optional[int] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 662 | 0 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCAmelCase : List[str] = 'true'
def _UpperCamelCase ( A : Dict, A : Tuple=82, A : List[Any]=16 ):
'''simple docstring'''
set_seed(42 )
a = RegressionModel()
a = deepcopy(A )
a = RegressionDataset(length=A )
a = DataLoader(A, batch_size=A )
model.to(accelerator.device )
a , a = accelerator.prepare(A, A )
return model, ddp_model, dataloader
def _UpperCamelCase ( A : Accelerator, A : Union[str, Any]=False ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
a = load_dataset("glue", "mrpc", split="validation" )
def tokenize_function(A : Tuple ):
a = tokenizer(examples["sentence1"], examples["sentence2"], truncation=A, max_length=A )
return outputs
with accelerator.main_process_first():
a = dataset.map(
A, batched=A, remove_columns=["idx", "sentence1", "sentence2"], )
a = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(A : List[Any] ):
if use_longest:
return tokenizer.pad(A, padding="longest", return_tensors="pt" )
return tokenizer.pad(A, padding="max_length", max_length=128, return_tensors="pt" )
return DataLoader(A, shuffle=A, collate_fn=A, batch_size=16 )
def _UpperCamelCase ( A : Dict, A : str ):
'''simple docstring'''
a = Accelerator(dispatch_batches=A, split_batches=A )
a = get_dataloader(A, not dispatch_batches )
a = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased", return_dict=A )
a , a = accelerator.prepare(A, A )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCamelCase ( A : List[Any], A : Optional[Any], A : str ):
'''simple docstring'''
a = []
for batch in dataloader:
a , a = batch.values()
with torch.no_grad():
a = model(A )
a , a = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
a , a = [], []
for logit, targ in logits_and_targets:
logits.append(A )
targs.append(A )
a , a = torch.cat(A ), torch.cat(A )
return logits, targs
def _UpperCamelCase ( A : Accelerator, A : Dict=82, A : Any=False, A : List[str]=False, A : List[str]=16 ):
'''simple docstring'''
a , a , a = get_basic_setup(A, A, A )
a , a = generate_predictions(A, A, A )
assert (
len(A ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(A )}"""
def _UpperCamelCase ( A : bool = False, A : bool = False ):
'''simple docstring'''
a = evaluate.load("glue", "mrpc" )
a , a = get_mrpc_setup(A, A )
# First do baseline
a , a , a = setup["no"]
model.to(A )
model.eval()
for batch in dataloader:
batch.to(A )
with torch.inference_mode():
a = model(**A )
a = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=A, references=batch["labels"] )
a = metric.compute()
# Then do distributed
a , a , a = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
a = model(**A )
a = outputs.logits.argmax(dim=-1 )
a = batch["labels"]
a , a = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=A, references=A )
a = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key], distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def _UpperCamelCase ( ):
'''simple docstring'''
a = Accelerator(split_batches=A, dispatch_batches=A )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(A, A )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
a = Accelerator(split_batches=A, dispatch_batches=A )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(A, 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
a = Accelerator()
test_torch_metrics(A, 512 )
accelerator.state._reset_state()
def _UpperCamelCase ( A : Dict ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 714 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Union[str, Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=1 ) -> Union[str, Any]:
a = tokenizer
a = dataset
a = len(__lowerCamelCase ) if n_tasks is None else n_tasks
a = n_copies
def __iter__( self : Tuple ) -> str:
a = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
a = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]:
a = start_length
a = eof_strings
a = tokenizer
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
a = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__lowerCamelCase )
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a = re.split("(%s)" % "|".join(A ), A )
# last string should be ""
return "".join(string_list[:-2] )
def __magic_name__ ( A : Union[str, Any], A : Optional[Any], A : List[Any], A : Optional[Any], A : List[str], A : List[Any]=20, **A : Union[str, Any] ):
'''simple docstring'''
a = defaultdict(A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A ) ):
with torch.no_grad():
a = batch["ids"].shape[-1]
a = accelerator.unwrap_model(A ).generate(
input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=A, **A )
# each task is generated batch_size times
a = batch["task_id"].repeat(A )
a = accelerator.pad_across_processes(
A, dim=1, pad_index=tokenizer.pad_token_id )
a , a = accelerator.gather((generated_tokens, generated_tasks) )
a = generated_tokens.cpu().numpy()
a = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A, A ):
gen_token_dict[task].append(A )
a = [[] for _ in range(A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a = tokenizer.decode(A, skip_special_tokens=A, clean_up_tokenization_spaces=A )
code_gens[task].append(remove_last_block(A ) )
return code_gens
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser(A )
a = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a = "false"
if args.num_workers is None:
a = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a = Accelerator()
set_seed(args.seed, device_specific=A )
# Load model and tokenizer
a = AutoTokenizer.from_pretrained(args.model_ckpt )
a = tokenizer.eos_token
a = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0, A, A )] ),
}
# Load evaluation dataset and metric
a = load_dataset("openai_humaneval" )
a = load_metric("code_eval" )
a = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
a = args.n_samples // args.batch_size
a = TokenizedDataset(A, human_eval["test"], n_copies=A, n_tasks=A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a = DataLoader(A, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a = code_eval_metric.compute(references=[""], predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
a , a = accelerator.prepare(A, A )
a = complete_code(
A, A, A, A, n_tasks=A, batch_size=args.batch_size, **A, )
if accelerator.is_main_process:
a = []
for task in tqdm(range(A ) ):
a = human_eval["test"][task]["test"]
a = F"""check({human_eval["test"][task]["entry_point"]})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
a , a = code_eval_metric.compute(
references=A, predictions=A, num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file, "w" ) as fp:
json.dump(A, A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 662 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = """perceiver"""
def __init__( self : Optional[Any] , __lowerCamelCase : Tuple=2_56 , __lowerCamelCase : int=12_80 , __lowerCamelCase : Union[str, Any]=7_68 , __lowerCamelCase : Any=1 , __lowerCamelCase : Optional[Any]=26 , __lowerCamelCase : Optional[int]=8 , __lowerCamelCase : Union[str, Any]=8 , __lowerCamelCase : int=None , __lowerCamelCase : int=None , __lowerCamelCase : List[Any]="kv" , __lowerCamelCase : Any=1 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : Any=1e-12 , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=2_62 , __lowerCamelCase : Union[str, Any]=20_48 , __lowerCamelCase : Tuple=56 , __lowerCamelCase : Optional[Any]=[3_68, 4_96] , __lowerCamelCase : str=16 , __lowerCamelCase : int=19_20 , __lowerCamelCase : int=16 , __lowerCamelCase : Optional[Any]=[1, 16, 2_24, 2_24] , **__lowerCamelCase : Union[str, Any] , ) -> Optional[Any]:
super().__init__(**__lowerCamelCase )
a = num_latents
a = d_latents
a = d_model
a = num_blocks
a = num_self_attends_per_block
a = num_self_attention_heads
a = num_cross_attention_heads
a = qk_channels
a = v_channels
a = cross_attention_shape_for_attention
a = self_attention_widening_factor
a = cross_attention_widening_factor
a = hidden_act
a = attention_probs_dropout_prob
a = initializer_range
a = layer_norm_eps
a = use_query_residual
# masked language modeling attributes
a = vocab_size
a = max_position_embeddings
# image classification attributes
a = image_size
# flow attributes
a = train_size
# multimodal autoencoding attributes
a = num_frames
a = audio_samples_per_frame
a = samples_per_patch
a = output_shape
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a = {0: "batch", 1: "choice", 2: "sequence"}
else:
a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __UpperCAmelCase ( self : Tuple ) -> float:
return 1e-4
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 40 , __lowerCamelCase : int = 40 , ) -> Mapping[str, Any]:
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__lowerCamelCase , __lowerCamelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a = preprocessor.num_special_tokens_to_add(__lowerCamelCase )
a = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
a = [" ".join(["a"] ) * seq_length] * batch_size
a = dict(preprocessor(__lowerCamelCase , return_tensors=__lowerCamelCase ) )
a = inputs.pop("input_ids" )
return inputs
elif isinstance(__lowerCamelCase , __lowerCamelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a = compute_effective_axis_dimension(__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
a = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a = dict(preprocessor(images=__lowerCamelCase , return_tensors=__lowerCamelCase ) )
a = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 715 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Any = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 0 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[Any] ) -> int:
a = tempfile.mkdtemp()
a = 5
# Realm tok
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
a = os.path.join(__lowerCamelCase , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def __UpperCAmelCase ( self : str ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
a = RealmConfig(num_block_records=self.num_block_records )
return config
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
a = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def __UpperCAmelCase ( self : int ) -> Any:
a = np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
] , dtype=__lowerCamelCase , )
return block_records
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
a = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
a = self.get_config()
a = self.get_dummy_retriever()
a = retriever.tokenizer
a = np.array([0, 3] , dtype="long" )
a = tokenizer(["Test question"] ).input_ids
a = tokenizer(
["the fourth"] , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ).input_ids
a = config.reader_seq_len
a , a , a , a = retriever(
__lowerCamelCase , __lowerCamelCase , answer_ids=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors="np" )
self.assertEqual(len(__lowerCamelCase ) , 2 )
self.assertEqual(len(__lowerCamelCase ) , 2 )
self.assertEqual(len(__lowerCamelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.get_config()
a = self.get_dummy_retriever()
a = retriever.tokenizer
a = np.array([0, 3, 5] , dtype="long" )
a = tokenizer(["Test question"] ).input_ids
a = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ).input_ids
a = config.reader_seq_len
a , a , a , a = retriever(
__lowerCamelCase , __lowerCamelCase , answer_ids=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors="np" )
self.assertEqual([False, True, True] , __lowerCamelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __lowerCamelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
a = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
a = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
a = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
a = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
| 716 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : Optional[int] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a = {"unk_token": "<unk>"}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
a = "lower newer"
a = "lower newer"
return input_text, output_text
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = "lower newer"
a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokens + [tokenizer.unk_token]
a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self : Any ) -> str:
a = self.get_tokenizer()
a = "Encode this sequence."
a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# Testing spaces after special tokens
a = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
a = "Encode <mask> sequence"
a = "Encode <mask>sequence"
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
pass
def __UpperCAmelCase ( self : int ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = "A, <mask> AllenNLP sentence."
a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a = f"""{text_of_1_token} {text_of_1_token}"""
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
| 662 | 0 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __magic_name__ ( A : int, A : int, A : float = 1 / sqrt(2 ) ):
'''simple docstring'''
a = tau * frequency / samplerate
a = sin(A )
a = cos(A )
a = _sin / (2 * q_factor)
a = (1 - _cos) / 2
a = 1 - _cos
a = 1 + alpha
a = -2 * _cos
a = 1 - alpha
a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __magic_name__ ( A : int, A : int, A : float = 1 / sqrt(2 ) ):
'''simple docstring'''
a = tau * frequency / samplerate
a = sin(A )
a = cos(A )
a = _sin / (2 * q_factor)
a = (1 + _cos) / 2
a = -1 - _cos
a = 1 + alpha
a = -2 * _cos
a = 1 - alpha
a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __magic_name__ ( A : int, A : int, A : float = 1 / sqrt(2 ) ):
'''simple docstring'''
a = tau * frequency / samplerate
a = sin(A )
a = cos(A )
a = _sin / (2 * q_factor)
a = _sin / 2
a = 0
a = -ba
a = 1 + alpha
a = -2 * _cos
a = 1 - alpha
a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __magic_name__ ( A : int, A : int, A : float = 1 / sqrt(2 ) ):
'''simple docstring'''
a = tau * frequency / samplerate
a = sin(A )
a = cos(A )
a = _sin / (2 * q_factor)
a = 1 - alpha
a = -2 * _cos
a = 1 + alpha
a = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba], [ba, ba, ba] )
return filt
def __magic_name__ ( A : int, A : int, A : float, A : float = 1 / sqrt(2 ), ):
'''simple docstring'''
a = tau * frequency / samplerate
a = sin(A )
a = cos(A )
a = _sin / (2 * q_factor)
a = 10 ** (gain_db / 40)
a = 1 + alpha * big_a
a = -2 * _cos
a = 1 - alpha * big_a
a = 1 + alpha / big_a
a = -2 * _cos
a = 1 - alpha / big_a
a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __magic_name__ ( A : int, A : int, A : float, A : float = 1 / sqrt(2 ), ):
'''simple docstring'''
a = tau * frequency / samplerate
a = sin(A )
a = cos(A )
a = _sin / (2 * q_factor)
a = 10 ** (gain_db / 40)
a = (big_a + 1) - (big_a - 1) * _cos
a = (big_a + 1) + (big_a - 1) * _cos
a = (big_a - 1) - (big_a + 1) * _cos
a = (big_a - 1) + (big_a + 1) * _cos
a = 2 * sqrt(A ) * alpha
a = big_a * (pmc + aaa)
a = 2 * big_a * mpc
a = big_a * (pmc - aaa)
a = ppmc + aaa
a = -2 * pmpc
a = ppmc - aaa
a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __magic_name__ ( A : int, A : int, A : float, A : float = 1 / sqrt(2 ), ):
'''simple docstring'''
a = tau * frequency / samplerate
a = sin(A )
a = cos(A )
a = _sin / (2 * q_factor)
a = 10 ** (gain_db / 40)
a = (big_a + 1) - (big_a - 1) * _cos
a = (big_a + 1) + (big_a - 1) * _cos
a = (big_a - 1) - (big_a + 1) * _cos
a = (big_a - 1) + (big_a + 1) * _cos
a = 2 * sqrt(A ) * alpha
a = big_a * (ppmc + aaa)
a = -2 * big_a * pmpc
a = big_a * (ppmc - aaa)
a = pmc + aaa
a = 2 * mpc
a = pmc - aaa
a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
| 717 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__lowerCAmelCase : int = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 662 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any=7 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : int=30 , __lowerCamelCase : int=4_00 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=1 / 2_55 , __lowerCamelCase : Optional[int]=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
a = parent
a = batch_size
a = num_channels
a = min_resolution
a = max_resolution
a = do_resize
a = size
a = do_normalize
a = image_mean
a = image_std
a = do_rescale
a = rescale_factor
a = do_pad
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str=False ) -> List[str]:
if not batched:
a = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
a , a = image.size
else:
a , a = image.shape[1], image.shape[2]
if w < h:
a = int(self.size["shortest_edge"] * h / w )
a = self.size["shortest_edge"]
elif w > h:
a = self.size["shortest_edge"]
a = int(self.size["shortest_edge"] * w / h )
else:
a = self.size["shortest_edge"]
a = self.size["shortest_edge"]
else:
a = []
for image in image_inputs:
a , a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> int:
pass
def __UpperCAmelCase ( self : Any ) -> Any:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Any ) -> List[str]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCAmelCase ( self : Any ) -> List[Any]:
# prepare image and target
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"image_id": 3_97_69, "annotations": target}
# encode them
a = DetaImageProcessor()
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
# prepare image, target and masks_path
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
a = DetaImageProcessor(format="coco_panoptic" )
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify masks
a = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
| 718 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : Dict = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__lowerCAmelCase : Tuple = logging.getLogger(__name__)
def __magic_name__ ( A : torch.nn.Module, A : BnbQuantizationConfig, A : Union[str, os.PathLike] = None, A : Optional[Dict[str, Union[int, str, torch.device]]] = None, A : Optional[List[str]] = None, A : Optional[Dict[Union[int, str], Union[int, str]]] = None, A : Optional[Union[str, os.PathLike]] = None, A : bool = False, ):
'''simple docstring'''
a = bnb_quantization_config.load_in_abit
a = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
a = []
# custom device map
if isinstance(A, A ) and len(device_map.keys() ) > 1:
a = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
a = get_keys_to_not_convert(A )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(A )
a = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
a = []
a = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(A )
# compatibility with peft
a = load_in_abit
a = load_in_abit
a = get_parameter_device(A )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
a = replace_with_bnb_layers(A, A, modules_to_not_convert=A )
# convert param to the right dtype
a = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
a = name.replace(".weight", "" ).replace(".bias", "" )
a = getattr(A, A, A )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(A ):
param.to(A )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
a = replace_with_bnb_layers(
A, A, modules_to_not_convert=A )
a = get_quantized_model_device_map(
A, A, A, max_memory=A, no_split_module_classes=A, )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
a = True
a = any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
A, A, A, dtype=bnb_quantization_config.torch_dtype, offload_folder=A, offload_state_dict=A, keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules, offload_abit_bnb=load_in_abit and offload, )
return dispatch_model(A, device_map=A, offload_dir=A )
def __magic_name__ ( A : str, A : Union[str, Any], A : Union[str, Any]=None, A : Optional[Any]=None, A : Dict=None ):
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
a = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(A, A ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
a = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
a = {}
a = special_dtypes
a = no_split_module_classes
a = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
a = get_balanced_memory(
A, low_zero=(device_map == "balanced_low_0"), max_memory=A, **A, )
a = max_memory
a = infer_auto_device_map(A, **A )
if isinstance(A, A ):
# check if don't have any quantized module on the cpu
a = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
a = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def __magic_name__ ( A : Union[str, Any], A : Optional[int], A : int=None, A : List[Any]=None ):
'''simple docstring'''
if modules_to_not_convert is None:
a = []
a , a = _replace_with_bnb_layers(
A, A, A, A )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def __magic_name__ ( A : Union[str, Any], A : Optional[Any], A : str=None, A : Optional[Any]=None, ):
'''simple docstring'''
a = False
for name, module in model.named_children():
if current_key_name is None:
a = []
current_key_name.append(A )
if isinstance(A, nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
a = ".".join(A )
a = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
a = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
a = bnb.nn.LinearabitLt(
module.in_features, module.out_features, module.bias is not None, has_fpaa_weights=A, threshold=bnb_quantization_config.llm_inta_threshold, )
elif bnb_quantization_config.load_in_abit:
a = bnb.nn.Linearabit(
module.in_features, module.out_features, module.bias is not None, bnb_quantization_config.bnb_abit_compute_dtype, compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant, quant_type=bnb_quantization_config.bnb_abit_quant_type, )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
a = module.weight.data
if module.bias is not None:
a = module.bias.data
bnb_module.requires_grad_(A )
setattr(A, A, A )
a = True
if len(list(module.children() ) ) > 0:
a , a = _replace_with_bnb_layers(
A, A, A, A )
a = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
with init_empty_weights():
a = deepcopy(A ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
a = find_tied_parameters(A )
# For compatibility with Accelerate < 0.18
if isinstance(A, A ):
a = sum(list(tied_params.values() ), [] ) + list(tied_params.keys() )
else:
a = sum(A, [] )
a = len(A ) > 0
# Check if it is a base model
a = False
if hasattr(A, "base_model_prefix" ):
a = not hasattr(A, model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
a = list(model.named_children() )
a = [list_modules[-1][0]]
# add last module together with tied weights
a = set(A ) - set(A )
a = list(set(A ) ) + list(A )
# remove ".weight" from the keys
a = [".weight", ".bias"]
a = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
a = name.replace(A, "" )
filtered_module_names.append(A )
return filtered_module_names
def __magic_name__ ( A : int ):
'''simple docstring'''
for m in model.modules():
if isinstance(A, bnb.nn.Linearabit ):
return True
return False
def __magic_name__ ( A : nn.Module ):
'''simple docstring'''
return next(parameter.parameters() ).device
def __magic_name__ ( A : Dict, A : List[str], A : int, A : Tuple, A : Optional[int], A : Tuple, A : List[Any] ):
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(A, A, 0, dtype=A, value=A )
a = param_name
a = model
if "." in tensor_name:
a = tensor_name.split("." )
for split in splits[:-1]:
a = getattr(A, A )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
a = new_module
a = splits[-1]
# offload weights
a = False
offload_weight(module._parameters[tensor_name], A, A, index=A )
if hasattr(module._parameters[tensor_name], "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB, param_name.replace("weight", "SCB" ), A, index=A, )
else:
offload_weight(A, A, A, index=A )
offload_weight(A, param_name.replace("weight", "SCB" ), A, index=A )
set_module_tensor_to_device(A, A, "meta", dtype=A, value=torch.empty(*param.size() ) )
| 719 |
import math
import flax.linen as nn
import jax.numpy as jnp
def __magic_name__ ( A : jnp.ndarray, A : int, A : float = 1, A : float = 1, A : float = 1.0E4, A : bool = False, A : float = 1.0, ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
a = float(embedding_dim // 2 )
a = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
a = min_timescale * jnp.exp(jnp.arange(A, dtype=jnp.floataa ) * -log_timescale_increment )
a = jnp.expand_dims(A, 1 ) * jnp.expand_dims(A, 0 )
# scale embeddings
a = scale * emb
if flip_sin_to_cos:
a = jnp.concatenate([jnp.cos(A ), jnp.sin(A )], axis=1 )
else:
a = jnp.concatenate([jnp.sin(A ), jnp.cos(A )], axis=1 )
a = jnp.reshape(A, [jnp.shape(A )[0], embedding_dim] )
return signal
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__lowerCamelCase )
a = nn.silu(__lowerCamelCase )
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__lowerCamelCase )
return temb
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : float = 1
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : int ) -> Union[str, Any]:
return get_sinusoidal_embeddings(
__lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 662 | 0 |
import argparse
import os
import re
__lowerCAmelCase : Union[str, Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__lowerCAmelCase : Dict = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
__lowerCAmelCase : Any = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __magic_name__ ( A : int, A : bool = False ):
'''simple docstring'''
with open(A, "r", encoding="utf-8" ) as f:
a = f.read()
a = content.split("\n" )
a = []
a = 0
while line_idx < len(A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a = len(re.search(R"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a = sorted(A, key=lambda A : _re_identifier.search(A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(A, "w", encoding="utf-8" ) as f:
f.write("\n".join(A ) )
elif "\n".join(A ) != content:
return True
def __magic_name__ ( A : bool = False ):
'''simple docstring'''
a = [os.path.join(A, A ) for f in os.listdir(A ) if f.endswith(".py" )]
a = [sort_auto_mapping(A, overwrite=A ) for fname in fnames]
if not overwrite and any(A ):
a = [f for f, d in zip(A, A ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {", ".join(A )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 720 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> Dict:
a = tempfile.mkdtemp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
a = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = self.get_image_processor()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
a = self.get_image_processor(do_normalize=__lowerCamelCase )
a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCamelCase , return_tensors="np" )
a = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCamelCase )
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 662 | 0 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple=13 , __lowerCamelCase : Optional[int]=7 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=99 , __lowerCamelCase : Tuple=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : List[str]=37 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : List[Any]=5_12 , __lowerCamelCase : int=16 , __lowerCamelCase : str=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : int=3 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Dict=None , ) -> Any:
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : str ) -> Optional[Any]:
a = LlamaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
a = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Dict , ) -> Tuple:
a = True
a = LlamaModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )
a = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , )
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , ) -> str:
a = LlamaForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , ) -> Optional[Any]:
a = True
a = True
a = LlamaForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# first forward pass
a = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase , )
a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) , config.vocab_size )
a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a = torch.cat([input_ids, next_tokens] , dim=-1 )
a = torch.cat([input_mask, next_mask] , dim=-1 )
a = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )["hidden_states"][0]
a = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )["hidden_states"][0]
# select random slice
a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a = output_from_no_past[:, -3:, random_slice_idx].detach()
a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (LlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[Any] = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
a = LlamaModelTester(self )
a = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ) -> str:
a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = input_dict["input_ids"]
a = input_ids.ne(1 ).to(__lowerCamelCase )
a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a = LlamaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = "single_label_classification"
a = input_dict["input_ids"]
a = input_ids.ne(1 ).to(__lowerCamelCase )
a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a = LlamaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = "multi_label_classification"
a = input_dict["input_ids"]
a = input_ids.ne(1 ).to(__lowerCamelCase )
a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
a = LlamaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def __UpperCAmelCase ( self : Dict ) -> Any:
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Tuple ) -> Union[str, Any]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = ids_tensor([1, 10] , config.vocab_size )
a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a = LlamaModel(__lowerCamelCase )
original_model.to(__lowerCamelCase )
original_model.eval()
a = original_model(__lowerCamelCase ).last_hidden_state
a = original_model(__lowerCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a = {"type": scaling_type, "factor": 10.0}
a = LlamaModel(__lowerCamelCase )
scaled_model.to(__lowerCamelCase )
scaled_model.eval()
a = scaled_model(__lowerCamelCase ).last_hidden_state
a = scaled_model(__lowerCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-5 ) )
@require_torch
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __UpperCAmelCase ( self : Dict ) -> int:
a = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
a = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
a = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __lowerCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
a = model(torch.tensor(__lowerCamelCase ) )
# Expected mean on dim = -1
a = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __lowerCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __UpperCAmelCase ( self : Any ) -> Tuple:
a = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
a = model(torch.tensor(__lowerCamelCase ) )
# Expected mean on dim = -1
a = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
a = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
a = model(torch.tensor(__lowerCamelCase ) )
a = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1e-2 , rtol=1e-2 )
# fmt: off
a = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __lowerCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip("Model is curently gated" )
@slow
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
a = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
a = "Simply put, the theory of relativity states that "
a = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
a = tokenizer.encode(__lowerCamelCase , return_tensors="pt" )
a = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=__lowerCamelCase )
# greedy generation outputs
a = model.generate(__lowerCamelCase , max_new_tokens=64 , top_p=__lowerCamelCase , temperature=1 , do_sample=__lowerCamelCase )
a = tokenizer.decode(generated_ids[0] , skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 721 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$", A ).groups()[0]
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None ) -> Tuple:
a = file_names
a = image_transform
a = label_to_id
def __len__( self : Any ) -> Tuple:
return len(self.file_names )
def __getitem__( self : List[Any] , __lowerCamelCase : List[Any] ) -> int:
a = self.file_names[idx]
a = PIL.Image.open(__lowerCamelCase )
a = raw_image.convert("RGB" )
if self.image_transform is not None:
a = self.image_transform(__lowerCamelCase )
a = extract_label(__lowerCamelCase )
if self.label_to_id is not None:
a = self.label_to_id[label]
return {"image": image, "label": label}
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
if args.with_tracking:
a = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir )
else:
a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config["lr"]
a = int(config["num_epochs"] )
a = int(config["seed"] )
a = int(config["batch_size"] )
a = config["image_size"]
if not isinstance(A, (list, tuple) ):
a = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps, "isdigit" ):
if args.checkpointing_steps == "epoch":
a = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
a = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
a = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
a = os.path.split(A )[-1].split("." )[0]
accelerator.init_trackers(A, A )
# Grab all the image filenames
a = [os.path.join(args.data_dir, A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
a = [extract_label(A ) for fname in file_names]
a = list(set(A ) )
id_to_label.sort()
a = {lbl: i for i, lbl in enumerate(A )}
# Set the seed before splitting the data.
np.random.seed(A )
torch.manual_seed(A )
torch.cuda.manual_seed_all(A )
# Split our filenames between train and validation
a = np.random.permutation(len(A ) )
a = int(0.8 * len(A ) )
a = random_perm[:cut]
a = random_perm[cut:]
# For training we use a simple RandomResizedCrop
a = Compose([RandomResizedCrop(A, scale=(0.5, 1.0) ), ToTensor()] )
a = PetsDataset(
[file_names[i] for i in train_split], image_transform=A, label_to_id=A )
# For evaluation, we use a deterministic Resize
a = Compose([Resize(A ), ToTensor()] )
a = PetsDataset([file_names[i] for i in eval_split], image_transform=A, label_to_id=A )
# Instantiate dataloaders.
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = create_model("resnet50d", pretrained=A, num_classes=len(A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
a = False
for param in model.get_classifier().parameters():
a = True
# We normalize the batches of images to be a bit faster.
a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
a = torch.optim.Adam(params=model.parameters(), lr=lr / 25 )
# Instantiate learning rate scheduler
a = OneCycleLR(optimizer=A, max_lr=A, epochs=A, steps_per_epoch=len(A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
A, A, A, A, A )
# We need to keep track of how many total steps we have iterated over
a = 0
# We also need to keep track of the starting epoch so files are named properly
a = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
a = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
a = os.path.splitext(A )[0]
if "epoch" in training_difference:
a = int(training_difference.replace("epoch_", "" ) ) + 1
a = None
else:
a = int(training_difference.replace("step_", "" ) )
a = resume_step // len(A )
resume_step -= starting_epoch * len(A )
# Now we train the model
for epoch in range(A, A ):
model.train()
if args.with_tracking:
a = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
a = accelerator.skip_first_batches(A, A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
a = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
a = model(A )
a = torch.nn.functional.cross_entropy(A, batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(A, A ):
a = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
model.eval()
a = 0
a = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
with torch.no_grad():
a = model(A )
a = outputs.argmax(dim=-1 )
a , a = accelerator.gather_for_metrics((predictions, batch["label"]) )
a = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
a = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(A ),
"epoch": epoch,
}, step=A, )
if checkpointing_steps == "epoch":
a = F"""epoch_{epoch}"""
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
if args.with_tracking:
accelerator.end_training()
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir", required=A, help="The data folder on disk." )
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision", type=A, default=A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps", type=A, default=A, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", )
parser.add_argument(
"--output_dir", type=A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--resume_from_checkpoint", type=A, default=A, help="If the training should continue from a checkpoint folder.", )
parser.add_argument(
"--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", )
parser.add_argument(
"--project_dir", type=A, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", )
a = parser.parse_args()
a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(A, A )
if __name__ == "__main__":
main()
| 662 | 0 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase : Any = logging.get_logger(__name__)
def __magic_name__ ( A : List[Any], A : List[Any], A : Any ):
'''simple docstring'''
a = UniSpeechSatForSequenceClassification.from_pretrained(A, config=A )
a = downstream_dict["projector.weight"]
a = downstream_dict["projector.bias"]
a = downstream_dict["model.post_net.linear.weight"]
a = downstream_dict["model.post_net.linear.bias"]
return model
def __magic_name__ ( A : Any, A : Tuple, A : List[str] ):
'''simple docstring'''
a = UniSpeechSatForAudioFrameClassification.from_pretrained(A, config=A )
a = downstream_dict["model.linear.weight"]
a = downstream_dict["model.linear.bias"]
return model
def __magic_name__ ( A : str, A : Optional[Any], A : List[str] ):
'''simple docstring'''
a = UniSpeechSatForXVector.from_pretrained(A, config=A )
a = downstream_dict["connector.weight"]
a = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __magic_name__ ( A : List[Any], A : Optional[Any], A : int, A : List[Any] ):
'''simple docstring'''
a = torch.load(A, map_location="cpu" )
a = checkpoint["Downstream"]
a = UniSpeechSatConfig.from_pretrained(A )
a = WavaVecaFeatureExtractor.from_pretrained(
A, return_attention_mask=A, do_normalize=A )
a = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a = convert_classification(A, A, A )
elif arch.endswith("ForAudioFrameClassification" ):
a = convert_diarization(A, A, A )
elif arch.endswith("ForXVector" ):
a = convert_xvector(A, A, A )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(A )
hf_model.save_pretrained(A )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 700 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : Tuple = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
__lowerCAmelCase : Dict = '|'.join(sys.argv[1:])
__lowerCAmelCase : List[Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : List[Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 662 | 0 |
__lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)}
def __magic_name__ ( A : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) )
def __magic_name__ ( ):
'''simple docstring'''
return sum(
number
for number in range(1000, 1000000 )
if number == digits_fifth_powers_sum(A ) )
if __name__ == "__main__":
print(solution())
| 701 |
def __magic_name__ ( A : int, A : int, A : int ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
a = _modexpt(A, exponent // 2, A ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A, exponent - 1, A )) % modulo_value
def __magic_name__ ( A : int = 1777, A : int = 1855, A : int = 8 ):
'''simple docstring'''
a = base
for _ in range(1, A ):
a = _modexpt(A, A, 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 662 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Any = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__lowerCAmelCase : Any = 25_0004
__lowerCAmelCase : Optional[Any] = 25_0020
@require_sentencepiece
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = MBartaaTokenizer
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MBartaaTokenizerFast
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = True
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
a = MBartaaTokenizer(__lowerCamelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : int ) -> Any:
a = "<s>"
a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__lowerCamelCase ) , 10_54 )
def __UpperCAmelCase ( self : str ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
a = MBartaaTokenizer(__lowerCamelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__lowerCamelCase )
a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
a = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> int:
# fmt: off
a = {"input_ids": [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
a = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = tempfile.mkdtemp()
a = tokenizer_r.save_pretrained(__lowerCamelCase )
a = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
a = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
a = tokenizer_r.from_pretrained(__lowerCamelCase )
a = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=True
a = tempfile.mkdtemp()
a = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
a = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
a = tokenizer_r.from_pretrained(__lowerCamelCase )
a = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=False
a = tempfile.mkdtemp()
a = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
a = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
a = tokenizer_r.from_pretrained(__lowerCamelCase )
a = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = """facebook/mbart-large-50-one-to-many-mmt"""
SCREAMING_SNAKE_CASE_ : Any = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
SCREAMING_SNAKE_CASE_ : Any = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def __UpperCAmelCase ( cls : Optional[int] ) -> str:
a = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
a = 1
return cls
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 25_00_38 )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
self.assertIn(__lowerCamelCase , self.tokenizer.all_special_ids )
a = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
a = self.tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
a = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , __lowerCamelCase )
a = 10
a = self.tokenizer(__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase ).input_ids[0]
self.assertEqual(ids[0] , __lowerCamelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> int:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_00_53, 25_00_01] )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
a = tempfile.mkdtemp()
a = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCamelCase )
a = MBartaaTokenizer.from_pretrained(__lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCamelCase )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , return_tensors="pt" )
a = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
a = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
a = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
a = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __UpperCAmelCase ( self : List[str] ) -> str:
a = self.tokenizer(self.src_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=3 , return_tensors="pt" )
a = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=10 , return_tensors="pt" )
a = targets["input_ids"]
a = shift_tokens_right(__lowerCamelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
a = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , {
# en_XX, A, test, EOS
"input_ids": [[25_00_04, 62, 30_34, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_00_01,
} , )
| 702 |
def __magic_name__ ( A : str, A : str ):
'''simple docstring'''
def get_matched_characters(A : str, A : str ) -> str:
a = []
a = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a = int(max(0, i - limit ) )
a = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A )
a = F"""{_stra[0:_stra.index(A )]} {_stra[_stra.index(A ) + 1:]}"""
return "".join(A )
# matching characters
a = get_matched_characters(A, A )
a = get_matched_characters(A, A )
a = len(A )
# transposition
a = (
len([(ca, ca) for ca, ca in zip(A, A ) if ca != ca] ) // 2
)
if not match_count:
a = 0.0
else:
a = (
1
/ 3
* (
match_count / len(A )
+ match_count / len(A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 662 | 0 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class snake_case__ :
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : Optional[Any] , ) -> Optional[Any]:
a = parent
a = 13
a = 7
a = True
a = True
a = False
a = True
a = 99
a = 32
a = 2
a = 4
a = 37
a = "gelu"
a = 0.1
a = 0.1
a = 5_12
a = 16
a = 2
a = 0.02
a = 3
a = 4
a = None
def __UpperCAmelCase ( self : int ) -> Any:
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int ) -> Any:
a = TFDistilBertModel(config=__lowerCamelCase )
a = {"input_ids": input_ids, "attention_mask": input_mask}
a = model(__lowerCamelCase )
a = [input_ids, input_mask]
a = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ) -> Any:
a = TFDistilBertForMaskedLM(config=__lowerCamelCase )
a = {"input_ids": input_ids, "attention_mask": input_mask}
a = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Tuple ) -> Optional[Any]:
a = TFDistilBertForQuestionAnswering(config=__lowerCamelCase )
a = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
a = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[str] ) -> Tuple:
a = self.num_labels
a = TFDistilBertForSequenceClassification(__lowerCamelCase )
a = {"input_ids": input_ids, "attention_mask": input_mask}
a = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict ) -> Dict:
a = self.num_choices
a = TFDistilBertForMultipleChoice(__lowerCamelCase )
a = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
a = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
a = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
a = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ) -> Dict:
a = self.num_labels
a = TFDistilBertForTokenClassification(__lowerCamelCase )
a = {"input_ids": input_ids, "attention_mask": input_mask}
a = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
a = self.prepare_config_and_inputs()
((a) , (a) , (a) , (a) , (a) , (a)) = config_and_inputs
a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class snake_case__ (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
SCREAMING_SNAKE_CASE_ : Tuple = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : List[str] = False
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
a = TFDistilBertModelTester(self )
a = ConfigTester(self , config_class=__lowerCamelCase , dim=37 )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Any ) -> Any:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> int:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__lowerCamelCase )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
a = TFDistilBertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_tf
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> int:
a = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
a = tf.constant([[0, 1, 2, 3, 4, 5]] )
a = model(__lowerCamelCase )[0]
a = [1, 6, 7_68]
self.assertEqual(output.shape , __lowerCamelCase )
a = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 )
| 703 |
__lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)}
def __magic_name__ ( A : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) )
def __magic_name__ ( ):
'''simple docstring'''
return sum(
number
for number in range(1000, 1000000 )
if number == digits_fifth_powers_sum(A ) )
if __name__ == "__main__":
print(solution())
| 662 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( A : Tuple, A : Any, A : Union[str, Any] ):
'''simple docstring'''
a = MobileBertConfig.from_json_file(A )
print(F"""Building PyTorch model from configuration: {config}""" )
a = MobileBertForPreTraining(A )
# Load weights from tf checkpoint
a = load_tf_weights_in_mobilebert(A, A, A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict(), A )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 704 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any=7 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : int=30 , __lowerCamelCase : int=4_00 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=1 / 2_55 , __lowerCamelCase : Optional[int]=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
a = parent
a = batch_size
a = num_channels
a = min_resolution
a = max_resolution
a = do_resize
a = size
a = do_normalize
a = image_mean
a = image_std
a = do_rescale
a = rescale_factor
a = do_pad
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str=False ) -> List[str]:
if not batched:
a = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
a , a = image.size
else:
a , a = image.shape[1], image.shape[2]
if w < h:
a = int(self.size["shortest_edge"] * h / w )
a = self.size["shortest_edge"]
elif w > h:
a = self.size["shortest_edge"]
a = int(self.size["shortest_edge"] * w / h )
else:
a = self.size["shortest_edge"]
a = self.size["shortest_edge"]
else:
a = []
for image in image_inputs:
a , a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> int:
pass
def __UpperCAmelCase ( self : Any ) -> Any:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Any ) -> List[str]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCAmelCase ( self : Any ) -> List[Any]:
# prepare image and target
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"image_id": 3_97_69, "annotations": target}
# encode them
a = DetaImageProcessor()
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
# prepare image, target and masks_path
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
a = DetaImageProcessor(format="coco_panoptic" )
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify masks
a = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
| 662 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , **__lowerCamelCase : Any ) -> int:
a = feature_size
a = sampling_rate
a = padding_value
a = kwargs.pop("padding_side" , "right" )
a = kwargs.pop("return_attention_mask" , __lowerCamelCase )
super().__init__(**__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __lowerCamelCase : Union[bool, str, PaddingStrategy] = True , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
a = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
f""" to this method that includes {self.model_input_names[0]}, but you provided"""
f""" {list(processed_features.keys() )}""" )
a = processed_features[self.model_input_names[0]]
a = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__lowerCamelCase ) == 0:
if return_attention_mask:
a = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
a = required_input[0]
if isinstance(__lowerCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
a = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__lowerCamelCase ):
a = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__lowerCamelCase ):
a = "tf"
elif is_torch_tensor(__lowerCamelCase ):
a = "pt"
elif isinstance(__lowerCamelCase , (int, float, list, tuple, np.ndarray) ):
a = "np"
else:
raise ValueError(
f"""type of {first_element} unknown: {type(__lowerCamelCase )}. """
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
a = to_numpy(__lowerCamelCase )
else:
a = [to_numpy(__lowerCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
a = self._get_padding_strategies(padding=__lowerCamelCase , max_length=__lowerCamelCase )
a = processed_features[self.model_input_names[0]]
a = len(__lowerCamelCase )
if not all(len(__lowerCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
a = []
for i in range(__lowerCamelCase ):
a = {k: v[i] for k, v in processed_features.items()}
# truncation
a = self._truncate(
__lowerCamelCase , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , truncation=__lowerCamelCase , )
truncated_inputs.append(__lowerCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
a = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
a = PaddingStrategy.MAX_LENGTH
a = {}
for i in range(__lowerCamelCase ):
# padding
a = self._pad(
truncated_inputs[i] , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
a = []
if value.dtype is np.dtype(np.floataa ):
a = value.astype(np.floataa )
batch_outputs[key].append(__lowerCamelCase )
return BatchFeature(__lowerCamelCase , tensor_type=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ) -> dict:
a = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
a = len(__lowerCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__lowerCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
a = np.ones(len(__lowerCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
a = max_length - len(__lowerCamelCase )
if self.padding_side == "right":
if return_attention_mask:
a = np.pad(
processed_features["attention_mask"] , (0, difference) )
a = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
a = np.pad(
__lowerCamelCase , __lowerCamelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
a = np.pad(
processed_features["attention_mask"] , (difference, 0) )
a = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
a = np.pad(
__lowerCamelCase , __lowerCamelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ) -> List[str]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
a = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a = len(__lowerCamelCase ) > max_length
if needs_to_be_truncated:
a = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
a = processed_features["attention_mask"][:max_length]
return processed_features
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[str]=False , __lowerCamelCase : int=None ) -> int:
# Get padding strategy
if padding is not False:
if padding is True:
a = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__lowerCamelCase , __lowerCamelCase ):
a = PaddingStrategy(__lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
a = padding
else:
a = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 705 |
def __magic_name__ ( A : list ):
'''simple docstring'''
for i in range(len(A ) - 1, 0, -1 ):
a = False
for j in range(A, 0, -1 ):
if unsorted[j] < unsorted[j - 1]:
a , a = unsorted[j - 1], unsorted[j]
a = True
for j in range(A ):
if unsorted[j] > unsorted[j + 1]:
a , a = unsorted[j + 1], unsorted[j]
a = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(',')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 662 | 0 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = TransfoXLTokenizer
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
def __UpperCAmelCase ( self : str ) -> Optional[int]:
super().setUp()
a = [
"<unk>",
"[CLS]",
"[SEP]",
"want",
"unwanted",
"wa",
"un",
"running",
",",
"low",
"l",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Any ) -> Union[str, Any]:
a = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Tuple ) -> List[Any]:
a = "<unk> UNwanted , running"
a = "<unk> unwanted, running"
return input_text, output_text
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
a = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__lowerCamelCase )
a = tokenizer.tokenize("<unk> UNwanted , running" )
self.assertListEqual(__lowerCamelCase , ["<unk>", "unwanted", ",", "running"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [0, 4, 8, 7] )
def __UpperCAmelCase ( self : Dict ) -> int:
a = TransfoXLTokenizer(lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["hello", "!", "how", "are", "you", "?"] )
def __UpperCAmelCase ( self : Dict ) -> Dict:
a = TransfoXLTokenizer(lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
a = TransfoXLTokenizer(lower_case=__lowerCamelCase )
a = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"
a = [
"Hello",
"(",
"bracket",
")",
"and",
"side",
"@-@",
"scrolled",
"[",
"and",
"]",
"Henry",
"'s",
"$",
"5",
"@,@",
"000",
"with",
"3",
"@.@",
"34",
"m",
".",
"What",
"'s",
"up",
"!",
"?",
]
self.assertListEqual(tokenizer.tokenize(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> Dict:
a = self.get_tokenizer()
a = len(__lowerCamelCase )
tokenizer.add_tokens(["new1", "new2"] )
tokenizer.move_added_token("new1" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowerCamelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("new1" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , "new1" )
| 706 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowerCAmelCase : Optional[Any] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
__lowerCAmelCase : str = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
__lowerCAmelCase : List[Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[List[List[str]]] , __lowerCamelCase : List[List[str]] , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__lowerCamelCase , hypotheses=__lowerCamelCase , min_len=__lowerCamelCase , max_len=__lowerCamelCase )
}
| 662 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Optional[int]=18 , __lowerCamelCase : Dict=30 , __lowerCamelCase : Dict=4_00 , __lowerCamelCase : List[str]=True , __lowerCamelCase : int=None , __lowerCamelCase : Tuple=True , __lowerCamelCase : int=False , __lowerCamelCase : str=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=[0.5, 0.5, 0.5] , __lowerCamelCase : str=[0.5, 0.5, 0.5] , ) -> Dict:
a = parent
a = batch_size
a = num_channels
a = image_size
a = min_resolution
a = max_resolution
a = do_resize
a = size if size is not None else {"height": 18, "width": 20}
a = do_thumbnail
a = do_align_axis
a = do_pad
a = do_normalize
a = image_mean
a = image_std
def __UpperCAmelCase ( self : Any ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = DonutImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
a = DonutImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_thumbnail" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_align_long_axis" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
def __UpperCAmelCase ( self : str ) -> List[str]:
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 20} )
a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
# Previous config had dimensions in (width, height) order
a = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"height": 84, "width": 42} )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
pass
@is_flaky()
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 707 |
import argparse
import os
import re
__lowerCAmelCase : Union[str, Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__lowerCAmelCase : Dict = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
__lowerCAmelCase : Any = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __magic_name__ ( A : int, A : bool = False ):
'''simple docstring'''
with open(A, "r", encoding="utf-8" ) as f:
a = f.read()
a = content.split("\n" )
a = []
a = 0
while line_idx < len(A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a = len(re.search(R"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a = sorted(A, key=lambda A : _re_identifier.search(A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(A, "w", encoding="utf-8" ) as f:
f.write("\n".join(A ) )
elif "\n".join(A ) != content:
return True
def __magic_name__ ( A : bool = False ):
'''simple docstring'''
a = [os.path.join(A, A ) for f in os.listdir(A ) if f.endswith(".py" )]
a = [sort_auto_mapping(A, overwrite=A ) for fname in fnames]
if not overwrite and any(A ):
a = [f for f, d in zip(A, A ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {", ".join(A )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 662 | 0 |
def __magic_name__ ( A : int, A : int ):
'''simple docstring'''
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(A, int(b / 2 ) ) * actual_power(A, int(b / 2 ) )
else:
return a * actual_power(A, int(b / 2 ) ) * actual_power(A, int(b / 2 ) )
def __magic_name__ ( A : int, A : int ):
'''simple docstring'''
if b < 0:
return 1 / actual_power(A, A )
return actual_power(A, A )
if __name__ == "__main__":
print(power(-2, -3))
| 708 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = '▁'
__lowerCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model'}
__lowerCAmelCase : int = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
__lowerCAmelCase : Any = {
'google/reformer-crime-and-punishment': 52_4288,
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Dict=[] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ) -> None:
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, int]:
a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Optional[Any]:
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : str , __lowerCamelCase : Tuple ) -> List[Any]:
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : int , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict ) -> Any:
return self.sp_model.piece_to_id(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ) -> str:
if index < self.sp_model.get_piece_size():
a = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
a = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 662 | 0 |
def __magic_name__ ( A : int = 50 ):
'''simple docstring'''
a = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2, 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 709 |
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase : List[str] = [8, 5, 9, 7]
__lowerCAmelCase : str = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ) -> None:
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __UpperCAmelCase ( self : List[str] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __UpperCAmelCase ( self : str ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __UpperCAmelCase ( self : Dict ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __UpperCAmelCase ( self : Dict ) -> dict[int, list[int]]:
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Any ) -> None:
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
a = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def __UpperCAmelCase ( self : Any ) -> str:
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 0 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , *__lowerCamelCase : Dict , **__lowerCamelCase : Union[str, Any] ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 710 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = TypeVar('DatasetType', Dataset, IterableDataset)
def __magic_name__ ( A : List[DatasetType], A : Optional[List[float]] = None, A : Optional[int] = None, A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
def __magic_name__ ( A : List[DatasetType], A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : int = 0, ):
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A, info=A, split=A, axis=A )
else:
return _concatenate_iterable_datasets(A, info=A, split=A, axis=A )
| 662 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = """gpt_neox"""
def __init__( self : Dict , __lowerCamelCase : Dict=5_04_32 , __lowerCamelCase : Any=61_44 , __lowerCamelCase : Dict=44 , __lowerCamelCase : List[str]=64 , __lowerCamelCase : List[str]=2_45_76 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : str=0.25 , __lowerCamelCase : int=1_00_00 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=20_48 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : str=1e-5 , __lowerCamelCase : List[str]=True , __lowerCamelCase : int=0 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : List[str]=False , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=None , **__lowerCamelCase : Dict , ) -> Optional[Any]:
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = rotary_pct
a = rotary_emb_base
a = attention_dropout
a = hidden_dropout
a = classifier_dropout
a = initializer_range
a = layer_norm_eps
a = use_cache
a = tie_word_embeddings
a = use_parallel_residual
a = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
a = self.rope_scaling.get("type" , __lowerCamelCase )
a = self.rope_scaling.get("factor" , __lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 711 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : List[str] = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
__lowerCAmelCase : Any = '▁'
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : str = BigBirdTokenizer
SCREAMING_SNAKE_CASE_ : str = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self : int , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Tuple="[CLS]" , **__lowerCamelCase : Optional[Any] , ) -> List[Any]:
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
a = vocab_file
a = False if not self.vocab_file else True
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 662 | 0 |
from __future__ import annotations
from PIL import Image
# Define glider example
__lowerCAmelCase : Dict = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__lowerCAmelCase : Tuple = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def __magic_name__ ( A : list[list[int]] ):
'''simple docstring'''
a = []
for i in range(len(A ) ):
a = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
a = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
a = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A )
return next_generation
def __magic_name__ ( A : list[list[int]], A : int ):
'''simple docstring'''
a = []
for _ in range(A ):
# Create output image
a = Image.new("RGB", (len(cells[0] ), len(A )) )
a = img.load()
# Save cells to image
for x in range(len(A ) ):
for y in range(len(cells[0] ) ):
a = 255 - cells[y][x] * 255
a = (colour, colour, colour)
# Save image
images.append(A )
a = new_generation(A )
return images
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 712 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name", type=A, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", )
parser.add_argument(
"--dataset_config", type=A, default="wikitext-103-raw-v1", help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path", type=A, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", )
parser.add_argument(
"--shard_size", type=A, default=1000, help="Number of entries to go in a single shard.", )
parser.add_argument("--split", type=A, default="train", choices=["train", "test", "validation"] )
parser.add_argument(
"--limit", default=A, type=A, help="Limit the number of shards (used for debugging).", )
parser.add_argument(
"--max_length", type=A, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8.", )
parser.add_argument(
"--output_dir", default="tf-tpu", type=A, help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket.", )
a = parser.parse_args()
return args
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
def fn(A : Tuple ):
return tokenizer(examples["text"] )
return fn
def __magic_name__ ( A : Any ):
'''simple docstring'''
a = []
for i in range(len(tokenized_data["input_ids"] ) ):
a = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
a = tf.train.Features(feature=A )
a = tf.train.Example(features=A )
a = example.SerializeToString()
records.append(A )
return records
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
a = min(len(A ), args.limit )
a = dataset.select(range(A ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a = os.path.join(args.output_dir, args.split )
if not os.path.exists(A ):
os.makedirs(A )
else:
a = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
a = tokenize_function(A )
a = dataset.map(A, batched=A, num_proc=4, remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A : List[Any] ):
# Concatenate all texts.
a = {k: sum(examples[k], [] ) for k in examples.keys()}
a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a = {
k: [t[i : i + args.max_length] for i in range(0, A, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a = dataset_tokenized.map(A, batched=A, batch_size=1000, num_proc=4 )
a = 0
a = 0
for shard in range(0, len(A ), args.shard_size ):
a = grouped_dataset[shard : shard + args.shard_size]
a = len(dataset_snapshot["input_ids"] )
a = os.path.join(A, F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
a = get_serialized_examples(A )
with tf.io.TFRecordWriter(A ) as out_file:
for i in range(len(A ) ):
a = serialized_examples[i]
out_file.write(A )
print("Wrote file {} containing {} records".format(A, A ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""", "w" ) as f:
print(F"""Total {args.split} records: {total_records}""", file=A )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = parse_args()
main(args)
| 662 | 0 |
def __magic_name__ ( A : list, A : list ):
'''simple docstring'''
_validate_point(A )
_validate_point(A )
if len(A ) != len(A ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(A, A ) ) )
def __magic_name__ ( A : list[float] ):
'''simple docstring'''
if point:
if isinstance(A, A ):
for item in point:
if not isinstance(A, (int, float) ):
a = (
"Expected a list of numbers as input, found "
F"""{type(A ).__name__}"""
)
raise TypeError(A )
else:
a = F"""Expected a list of numbers as input, found {type(A ).__name__}"""
raise TypeError(A )
else:
raise ValueError("Missing an input" )
def __magic_name__ ( A : list, A : list ):
'''simple docstring'''
_validate_point(A )
_validate_point(A )
if len(A ) != len(A ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(A, A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
a = {}
a = tokenizer(example["content"], truncation=A )["input_ids"]
a = len(example["content"] ) / len(output["input_ids"] )
return output
__lowerCAmelCase : Dict = HfArgumentParser(PretokenizationArguments)
__lowerCAmelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCAmelCase : List[Any] = multiprocessing.cpu_count()
__lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__lowerCAmelCase : List[Any] = time.time()
__lowerCAmelCase : str = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : int = time.time()
__lowerCAmelCase : Optional[int] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 662 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : torch.FloatTensor
class snake_case__ (_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , __lowerCamelCase : int = 6_55_36 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 0 , __lowerCamelCase : str = "fourier" , __lowerCamelCase : bool = True , __lowerCamelCase : bool = False , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , __lowerCamelCase : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , __lowerCamelCase : Tuple[str] = "UNetMidBlock1D" , __lowerCamelCase : str = None , __lowerCamelCase : Tuple[int] = (32, 32, 64) , __lowerCamelCase : str = None , __lowerCamelCase : int = 8 , __lowerCamelCase : int = 1 , __lowerCamelCase : bool = False , ) -> List[str]:
super().__init__()
a = sample_size
# time
if time_embedding_type == "fourier":
a = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=__lowerCamelCase , log=__lowerCamelCase , flip_sin_to_cos=__lowerCamelCase )
a = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
a = Timesteps(
block_out_channels[0] , flip_sin_to_cos=__lowerCamelCase , downscale_freq_shift=__lowerCamelCase )
a = block_out_channels[0]
if use_timestep_embedding:
a = block_out_channels[0] * 4
a = TimestepEmbedding(
in_channels=__lowerCamelCase , time_embed_dim=__lowerCamelCase , act_fn=__lowerCamelCase , out_dim=block_out_channels[0] , )
a = nn.ModuleList([] )
a = None
a = nn.ModuleList([] )
a = None
# down
a = in_channels
for i, down_block_type in enumerate(__lowerCamelCase ):
a = output_channel
a = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
a = i == len(__lowerCamelCase ) - 1
a = get_down_block(
__lowerCamelCase , num_layers=__lowerCamelCase , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(__lowerCamelCase )
# mid
a = get_mid_block(
__lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=__lowerCamelCase , add_downsample=__lowerCamelCase , )
# up
a = list(reversed(__lowerCamelCase ) )
a = reversed_block_out_channels[0]
if out_block_type is None:
a = out_channels
else:
a = block_out_channels[0]
for i, up_block_type in enumerate(__lowerCamelCase ):
a = output_channel
a = (
reversed_block_out_channels[i + 1] if i < len(__lowerCamelCase ) - 1 else final_upsample_channels
)
a = i == len(__lowerCamelCase ) - 1
a = get_up_block(
__lowerCamelCase , num_layers=__lowerCamelCase , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(__lowerCamelCase )
a = output_channel
# out
a = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
a = get_out_block(
out_block_type=__lowerCamelCase , num_groups_out=__lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=__lowerCamelCase , act_fn=__lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : Union[torch.Tensor, float, int] , __lowerCamelCase : bool = True , ) -> Union[UNetaDOutput, Tuple]:
a = timestep
if not torch.is_tensor(__lowerCamelCase ):
a = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(__lowerCamelCase ) and len(timesteps.shape ) == 0:
a = timesteps[None].to(sample.device )
a = self.time_proj(__lowerCamelCase )
if self.config.use_timestep_embedding:
a = self.time_mlp(__lowerCamelCase )
else:
a = timestep_embed[..., None]
a = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
a = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
a = ()
for downsample_block in self.down_blocks:
a , a = downsample_block(hidden_states=__lowerCamelCase , temb=__lowerCamelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
a = self.mid_block(__lowerCamelCase , __lowerCamelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
a = down_block_res_samples[-1:]
a = down_block_res_samples[:-1]
a = upsample_block(__lowerCamelCase , res_hidden_states_tuple=__lowerCamelCase , temb=__lowerCamelCase )
# 5. post-process
if self.out_block:
a = self.out_block(__lowerCamelCase , __lowerCamelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=__lowerCamelCase )
| 714 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Union[str, Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=1 ) -> Union[str, Any]:
a = tokenizer
a = dataset
a = len(__lowerCamelCase ) if n_tasks is None else n_tasks
a = n_copies
def __iter__( self : Tuple ) -> str:
a = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
a = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]:
a = start_length
a = eof_strings
a = tokenizer
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
a = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__lowerCamelCase )
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a = re.split("(%s)" % "|".join(A ), A )
# last string should be ""
return "".join(string_list[:-2] )
def __magic_name__ ( A : Union[str, Any], A : Optional[Any], A : List[Any], A : Optional[Any], A : List[str], A : List[Any]=20, **A : Union[str, Any] ):
'''simple docstring'''
a = defaultdict(A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A ) ):
with torch.no_grad():
a = batch["ids"].shape[-1]
a = accelerator.unwrap_model(A ).generate(
input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=A, **A )
# each task is generated batch_size times
a = batch["task_id"].repeat(A )
a = accelerator.pad_across_processes(
A, dim=1, pad_index=tokenizer.pad_token_id )
a , a = accelerator.gather((generated_tokens, generated_tasks) )
a = generated_tokens.cpu().numpy()
a = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A, A ):
gen_token_dict[task].append(A )
a = [[] for _ in range(A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a = tokenizer.decode(A, skip_special_tokens=A, clean_up_tokenization_spaces=A )
code_gens[task].append(remove_last_block(A ) )
return code_gens
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser(A )
a = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a = "false"
if args.num_workers is None:
a = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a = Accelerator()
set_seed(args.seed, device_specific=A )
# Load model and tokenizer
a = AutoTokenizer.from_pretrained(args.model_ckpt )
a = tokenizer.eos_token
a = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0, A, A )] ),
}
# Load evaluation dataset and metric
a = load_dataset("openai_humaneval" )
a = load_metric("code_eval" )
a = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
a = args.n_samples // args.batch_size
a = TokenizedDataset(A, human_eval["test"], n_copies=A, n_tasks=A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a = DataLoader(A, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a = code_eval_metric.compute(references=[""], predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
a , a = accelerator.prepare(A, A )
a = complete_code(
A, A, A, A, n_tasks=A, batch_size=args.batch_size, **A, )
if accelerator.is_main_process:
a = []
for task in tqdm(range(A ) ):
a = human_eval["test"][task]["test"]
a = F"""check({human_eval["test"][task]["entry_point"]})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
a , a = code_eval_metric.compute(
references=A, predictions=A, num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file, "w" ) as fp:
json.dump(A, A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 662 | 0 |
'''simple docstring'''
def __magic_name__ ( A : list[int], A : str ):
'''simple docstring'''
a = int(A )
# Initialize Result
a = []
# Traverse through all denomination
for denomination in reversed(A ):
# Find denominations
while int(A ) >= int(A ):
total_value -= int(A )
answer.append(A ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__lowerCAmelCase : Any = []
__lowerCAmelCase : Optional[int] = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
__lowerCAmelCase : List[str] = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__lowerCAmelCase : int = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
__lowerCAmelCase : Dict = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
__lowerCAmelCase : str = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(F'''Following is minimal change for {value}: ''')
__lowerCAmelCase : int = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 715 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Any = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : str = {
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 716 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : Optional[int] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a = {"unk_token": "<unk>"}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
a = "lower newer"
a = "lower newer"
return input_text, output_text
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = "lower newer"
a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokens + [tokenizer.unk_token]
a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self : Any ) -> str:
a = self.get_tokenizer()
a = "Encode this sequence."
a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# Testing spaces after special tokens
a = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
a = "Encode <mask> sequence"
a = "Encode <mask>sequence"
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
pass
def __UpperCAmelCase ( self : int ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = "A, <mask> AllenNLP sentence."
a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a = f"""{text_of_1_token} {text_of_1_token}"""
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
| 662 | 0 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __lt__( self : str , __lowerCamelCase : int ) -> Any:
return self[-1] < other[-1]
def __eq__( self : int , __lowerCamelCase : Optional[Any] ) -> str:
return self[-1] == other[-1]
def __magic_name__ ( A : list ):
'''simple docstring'''
a = []
# sort into stacks
for element in collection:
a = Stack([element] )
a = bisect_left(A, A )
if i != len(A ):
stacks[i].append(A )
else:
stacks.append(A )
# use a heap-based merge to merge stack efficiently
a = merge(*(reversed(A ) for stack in stacks) )
return collection
if __name__ == "__main__":
__lowerCAmelCase : int = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : str = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 717 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__lowerCAmelCase : int = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 662 | 0 |
from math import factorial, pi
def __magic_name__ ( A : float, A : int = 30 ):
'''simple docstring'''
if not isinstance(A, (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(A, A ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
a = float(A )
a = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(A ) )
def __magic_name__ ( A : float, A : int = 30 ):
'''simple docstring'''
if not isinstance(A, (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(A, A ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
a = float(A )
a = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 718 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : Dict = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 0 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__lowerCAmelCase : int = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 719 |
import math
import flax.linen as nn
import jax.numpy as jnp
def __magic_name__ ( A : jnp.ndarray, A : int, A : float = 1, A : float = 1, A : float = 1.0E4, A : bool = False, A : float = 1.0, ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
a = float(embedding_dim // 2 )
a = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
a = min_timescale * jnp.exp(jnp.arange(A, dtype=jnp.floataa ) * -log_timescale_increment )
a = jnp.expand_dims(A, 1 ) * jnp.expand_dims(A, 0 )
# scale embeddings
a = scale * emb
if flip_sin_to_cos:
a = jnp.concatenate([jnp.cos(A ), jnp.sin(A )], axis=1 )
else:
a = jnp.concatenate([jnp.sin(A ), jnp.cos(A )], axis=1 )
a = jnp.reshape(A, [jnp.shape(A )[0], embedding_dim] )
return signal
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__lowerCamelCase )
a = nn.silu(__lowerCamelCase )
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__lowerCamelCase )
return temb
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : float = 1
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : int ) -> Union[str, Any]:
return get_sinusoidal_embeddings(
__lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 662 | 0 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Union[str, Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=1 ) -> Union[str, Any]:
a = tokenizer
a = dataset
a = len(__lowerCamelCase ) if n_tasks is None else n_tasks
a = n_copies
def __iter__( self : Tuple ) -> str:
a = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
a = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]:
a = start_length
a = eof_strings
a = tokenizer
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
a = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__lowerCamelCase )
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a = re.split("(%s)" % "|".join(A ), A )
# last string should be ""
return "".join(string_list[:-2] )
def __magic_name__ ( A : Union[str, Any], A : Optional[Any], A : List[Any], A : Optional[Any], A : List[str], A : List[Any]=20, **A : Union[str, Any] ):
'''simple docstring'''
a = defaultdict(A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A ) ):
with torch.no_grad():
a = batch["ids"].shape[-1]
a = accelerator.unwrap_model(A ).generate(
input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=A, **A )
# each task is generated batch_size times
a = batch["task_id"].repeat(A )
a = accelerator.pad_across_processes(
A, dim=1, pad_index=tokenizer.pad_token_id )
a , a = accelerator.gather((generated_tokens, generated_tasks) )
a = generated_tokens.cpu().numpy()
a = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A, A ):
gen_token_dict[task].append(A )
a = [[] for _ in range(A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a = tokenizer.decode(A, skip_special_tokens=A, clean_up_tokenization_spaces=A )
code_gens[task].append(remove_last_block(A ) )
return code_gens
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser(A )
a = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a = "false"
if args.num_workers is None:
a = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a = Accelerator()
set_seed(args.seed, device_specific=A )
# Load model and tokenizer
a = AutoTokenizer.from_pretrained(args.model_ckpt )
a = tokenizer.eos_token
a = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0, A, A )] ),
}
# Load evaluation dataset and metric
a = load_dataset("openai_humaneval" )
a = load_metric("code_eval" )
a = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
a = args.n_samples // args.batch_size
a = TokenizedDataset(A, human_eval["test"], n_copies=A, n_tasks=A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a = DataLoader(A, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a = code_eval_metric.compute(references=[""], predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
a , a = accelerator.prepare(A, A )
a = complete_code(
A, A, A, A, n_tasks=A, batch_size=args.batch_size, **A, )
if accelerator.is_main_process:
a = []
for task in tqdm(range(A ) ):
a = human_eval["test"][task]["test"]
a = F"""check({human_eval["test"][task]["entry_point"]})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
a , a = code_eval_metric.compute(
references=A, predictions=A, num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file, "w" ) as fp:
json.dump(A, A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 720 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> Dict:
a = tempfile.mkdtemp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
a = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = self.get_image_processor()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
a = self.get_image_processor(do_normalize=__lowerCamelCase )
a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCamelCase , return_tensors="np" )
a = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCamelCase )
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 662 | 0 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a = {}
a = job["started_at"]
a = job["completed_at"]
a = date_parser.parse(A )
a = date_parser.parse(A )
a = round((end_datetime - start_datetime).total_seconds() / 60.0 )
a = start
a = end
a = duration_in_min
return job_info
def __magic_name__ ( A : Optional[int], A : int=None ):
'''simple docstring'''
a = None
if token is not None:
a = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""}
a = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
a = requests.get(A, headers=A ).json()
a = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(A ) for job in result["jobs"]} )
a = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A ):
a = requests.get(url + F"""&page={i + 2}""", headers=A ).json()
job_time.update({job["name"]: extract_time_from_single_job(A ) for job in result["jobs"]} )
return job_time
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
__lowerCAmelCase : Any = get_job_time(args.workflow_run_id)
__lowerCAmelCase : str = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v['duration']}''')
| 721 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$", A ).groups()[0]
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None ) -> Tuple:
a = file_names
a = image_transform
a = label_to_id
def __len__( self : Any ) -> Tuple:
return len(self.file_names )
def __getitem__( self : List[Any] , __lowerCamelCase : List[Any] ) -> int:
a = self.file_names[idx]
a = PIL.Image.open(__lowerCamelCase )
a = raw_image.convert("RGB" )
if self.image_transform is not None:
a = self.image_transform(__lowerCamelCase )
a = extract_label(__lowerCamelCase )
if self.label_to_id is not None:
a = self.label_to_id[label]
return {"image": image, "label": label}
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
if args.with_tracking:
a = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir )
else:
a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config["lr"]
a = int(config["num_epochs"] )
a = int(config["seed"] )
a = int(config["batch_size"] )
a = config["image_size"]
if not isinstance(A, (list, tuple) ):
a = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps, "isdigit" ):
if args.checkpointing_steps == "epoch":
a = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
a = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
a = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
a = os.path.split(A )[-1].split("." )[0]
accelerator.init_trackers(A, A )
# Grab all the image filenames
a = [os.path.join(args.data_dir, A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
a = [extract_label(A ) for fname in file_names]
a = list(set(A ) )
id_to_label.sort()
a = {lbl: i for i, lbl in enumerate(A )}
# Set the seed before splitting the data.
np.random.seed(A )
torch.manual_seed(A )
torch.cuda.manual_seed_all(A )
# Split our filenames between train and validation
a = np.random.permutation(len(A ) )
a = int(0.8 * len(A ) )
a = random_perm[:cut]
a = random_perm[cut:]
# For training we use a simple RandomResizedCrop
a = Compose([RandomResizedCrop(A, scale=(0.5, 1.0) ), ToTensor()] )
a = PetsDataset(
[file_names[i] for i in train_split], image_transform=A, label_to_id=A )
# For evaluation, we use a deterministic Resize
a = Compose([Resize(A ), ToTensor()] )
a = PetsDataset([file_names[i] for i in eval_split], image_transform=A, label_to_id=A )
# Instantiate dataloaders.
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = create_model("resnet50d", pretrained=A, num_classes=len(A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
a = False
for param in model.get_classifier().parameters():
a = True
# We normalize the batches of images to be a bit faster.
a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
a = torch.optim.Adam(params=model.parameters(), lr=lr / 25 )
# Instantiate learning rate scheduler
a = OneCycleLR(optimizer=A, max_lr=A, epochs=A, steps_per_epoch=len(A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
A, A, A, A, A )
# We need to keep track of how many total steps we have iterated over
a = 0
# We also need to keep track of the starting epoch so files are named properly
a = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
a = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
a = os.path.splitext(A )[0]
if "epoch" in training_difference:
a = int(training_difference.replace("epoch_", "" ) ) + 1
a = None
else:
a = int(training_difference.replace("step_", "" ) )
a = resume_step // len(A )
resume_step -= starting_epoch * len(A )
# Now we train the model
for epoch in range(A, A ):
model.train()
if args.with_tracking:
a = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
a = accelerator.skip_first_batches(A, A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
a = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
a = model(A )
a = torch.nn.functional.cross_entropy(A, batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(A, A ):
a = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
model.eval()
a = 0
a = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
with torch.no_grad():
a = model(A )
a = outputs.argmax(dim=-1 )
a , a = accelerator.gather_for_metrics((predictions, batch["label"]) )
a = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
a = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(A ),
"epoch": epoch,
}, step=A, )
if checkpointing_steps == "epoch":
a = F"""epoch_{epoch}"""
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
if args.with_tracking:
accelerator.end_training()
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir", required=A, help="The data folder on disk." )
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision", type=A, default=A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps", type=A, default=A, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", )
parser.add_argument(
"--output_dir", type=A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--resume_from_checkpoint", type=A, default=A, help="If the training should continue from a checkpoint folder.", )
parser.add_argument(
"--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", )
parser.add_argument(
"--project_dir", type=A, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", )
a = parser.parse_args()
a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(A, A )
if __name__ == "__main__":
main()
| 662 | 0 |
from ..utils import DummyObject, requires_backends
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Optional[Any] , *__lowerCamelCase : int , **__lowerCamelCase : Any ) -> Optional[Any]:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __UpperCAmelCase ( cls : Tuple , *__lowerCamelCase : Tuple , **__lowerCamelCase : str ) -> List[Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __UpperCAmelCase ( cls : Tuple , *__lowerCamelCase : Any , **__lowerCamelCase : Any ) -> List[Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : int ) -> Dict:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __UpperCAmelCase ( cls : Any , *__lowerCamelCase : str , **__lowerCamelCase : int ) -> str:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __UpperCAmelCase ( cls : Any , *__lowerCamelCase : List[str] , **__lowerCamelCase : Tuple ) -> Union[str, Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Any , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : str ) -> Tuple:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __UpperCAmelCase ( cls : List[str] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Optional[Any] ) -> Tuple:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __UpperCAmelCase ( cls : str , *__lowerCamelCase : Any , **__lowerCamelCase : List[str] ) -> int:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ["""torch""", """transformers""", """onnx"""]
def __init__( self : int , *__lowerCamelCase : Any , **__lowerCamelCase : Any ) -> List[str]:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __UpperCAmelCase ( cls : Any , *__lowerCamelCase : int , **__lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __UpperCAmelCase ( cls : int , *__lowerCamelCase : int , **__lowerCamelCase : Union[str, Any] ) -> List[str]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Union[str, Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : List[str] ) -> Optional[int]:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Optional[Any] ) -> List[str]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __UpperCAmelCase ( cls : List[str] , *__lowerCamelCase : int , **__lowerCamelCase : Tuple ) -> Dict:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Optional[int] , *__lowerCamelCase : Dict , **__lowerCamelCase : List[Any] ) -> Any:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Any ) -> Union[str, Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 700 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : Tuple = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
__lowerCAmelCase : Dict = '|'.join(sys.argv[1:])
__lowerCAmelCase : List[Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : List[Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 662 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Any = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : Dict = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
__lowerCAmelCase : List[Any] = {'mobilebert-uncased': 512}
__lowerCAmelCase : Any = {}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[Any] = MobileBertTokenizer
def __init__( self : int , __lowerCamelCase : Tuple=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple="[UNK]" , __lowerCamelCase : Optional[Any]="[SEP]" , __lowerCamelCase : Union[str, Any]="[PAD]" , __lowerCamelCase : int="[CLS]" , __lowerCamelCase : List[str]="[MASK]" , __lowerCamelCase : Dict=True , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[Any] , ) -> List[str]:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __lowerCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __lowerCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __lowerCamelCase ) != tokenize_chinese_chars
):
a = getattr(__lowerCamelCase , normalizer_state.pop("type" ) )
a = do_lower_case
a = strip_accents
a = tokenize_chinese_chars
a = normalizer_class(**__lowerCamelCase )
a = do_lower_case
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : List[str]=None ) -> int:
a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
a = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 701 |
def __magic_name__ ( A : int, A : int, A : int ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
a = _modexpt(A, exponent // 2, A ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A, exponent - 1, A )) % modulo_value
def __magic_name__ ( A : int = 1777, A : int = 1855, A : int = 8 ):
'''simple docstring'''
a = base
for _ in range(1, A ):
a = _modexpt(A, A, 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 662 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__lowerCAmelCase : Dict = logging.get_logger(__name__)
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict , *__lowerCamelCase : Tuple , **__lowerCamelCase : int ) -> None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 702 |
def __magic_name__ ( A : str, A : str ):
'''simple docstring'''
def get_matched_characters(A : str, A : str ) -> str:
a = []
a = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a = int(max(0, i - limit ) )
a = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A )
a = F"""{_stra[0:_stra.index(A )]} {_stra[_stra.index(A ) + 1:]}"""
return "".join(A )
# matching characters
a = get_matched_characters(A, A )
a = get_matched_characters(A, A )
a = len(A )
# transposition
a = (
len([(ca, ca) for ca, ca in zip(A, A ) if ca != ca] ) // 2
)
if not match_count:
a = 0.0
else:
a = (
1
/ 3
* (
match_count / len(A )
+ match_count / len(A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 662 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : List[Any] = logging.get_logger(__name__)
__magic_name__ : str = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = """ctrl"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , __lowerCamelCase : int=24_65_34 , __lowerCamelCase : Optional[Any]=2_56 , __lowerCamelCase : List[str]=12_80 , __lowerCamelCase : List[str]=81_92 , __lowerCamelCase : Tuple=48 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : str=1e-6 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Union[str, Any]=True , **__lowerCamelCase : Optional[int] , ) -> List[Any]:
a = vocab_size
a = n_positions
a = n_embd
a = n_layer
a = n_head
a = dff
a = resid_pdrop
a = embd_pdrop
a = layer_norm_epsilon
a = initializer_range
a = use_cache
super().__init__(**__lowerCamelCase )
| 703 |
__lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)}
def __magic_name__ ( A : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) )
def __magic_name__ ( ):
'''simple docstring'''
return sum(
number
for number in range(1000, 1000000 )
if number == digits_fifth_powers_sum(A ) )
if __name__ == "__main__":
print(solution())
| 662 | 0 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : int = 16 , __lowerCamelCase : int = 88 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 32 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : str = "geglu" , __lowerCamelCase : Optional[int] = None , ) -> Tuple:
super().__init__()
a = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__lowerCamelCase , attention_head_dim=__lowerCamelCase , in_channels=__lowerCamelCase , num_layers=__lowerCamelCase , dropout=__lowerCamelCase , norm_num_groups=__lowerCamelCase , cross_attention_dim=__lowerCamelCase , attention_bias=__lowerCamelCase , sample_size=__lowerCamelCase , num_vector_embeds=__lowerCamelCase , activation_fn=__lowerCamelCase , num_embeds_ada_norm=__lowerCamelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
a = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
a = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
a = [1, 0]
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : bool = True , ) -> int:
a = hidden_states
a = []
a = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
a = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
a = self.transformer_index_for_condition[i]
a = self.transformers[transformer_index](
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , timestep=__lowerCamelCase , cross_attention_kwargs=__lowerCamelCase , return_dict=__lowerCamelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
a = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
a = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__lowerCamelCase )
| 704 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any=7 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : int=30 , __lowerCamelCase : int=4_00 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=1 / 2_55 , __lowerCamelCase : Optional[int]=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
a = parent
a = batch_size
a = num_channels
a = min_resolution
a = max_resolution
a = do_resize
a = size
a = do_normalize
a = image_mean
a = image_std
a = do_rescale
a = rescale_factor
a = do_pad
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str=False ) -> List[str]:
if not batched:
a = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
a , a = image.size
else:
a , a = image.shape[1], image.shape[2]
if w < h:
a = int(self.size["shortest_edge"] * h / w )
a = self.size["shortest_edge"]
elif w > h:
a = self.size["shortest_edge"]
a = int(self.size["shortest_edge"] * w / h )
else:
a = self.size["shortest_edge"]
a = self.size["shortest_edge"]
else:
a = []
for image in image_inputs:
a , a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> int:
pass
def __UpperCAmelCase ( self : Any ) -> Any:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Any ) -> List[str]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCAmelCase ( self : Any ) -> List[Any]:
# prepare image and target
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"image_id": 3_97_69, "annotations": target}
# encode them
a = DetaImageProcessor()
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
# prepare image, target and masks_path
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
a = DetaImageProcessor(format="coco_panoptic" )
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify masks
a = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
| 662 | 0 |
'''simple docstring'''
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self : str ) -> Dict:
super().__init__()
a = nn.Linear(3 , 4 )
a = nn.BatchNormad(4 )
a = nn.Linear(4 , 5 )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : int ) -> List[str]:
return self.lineara(self.batchnorm(self.lineara(__lowerCamelCase ) ) )
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> List[str]:
a = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__lowerCamelCase , model.state_dict() )
a = os.path.join(__lowerCamelCase , "index.json" )
self.assertTrue(os.path.isfile(__lowerCamelCase ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
a = os.path.join(__lowerCamelCase , f"""{key}.dat""" )
self.assertTrue(os.path.isfile(__lowerCamelCase ) )
# TODO: add tests on the fact weights are properly loaded
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
a = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
a = torch.randn(2 , 3 , dtype=__lowerCamelCase )
with TemporaryDirectory() as tmp_dir:
a = offload_weight(__lowerCamelCase , "weight" , __lowerCamelCase , {} )
a = os.path.join(__lowerCamelCase , "weight.dat" )
self.assertTrue(os.path.isfile(__lowerCamelCase ) )
self.assertDictEqual(__lowerCamelCase , {"weight": {"shape": [2, 3], "dtype": str(__lowerCamelCase ).split("." )[1]}} )
a = load_offloaded_weight(__lowerCamelCase , index["weight"] )
self.assertTrue(torch.equal(__lowerCamelCase , __lowerCamelCase ) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
a = ModelForTest()
a = model.state_dict()
a = {k: v for k, v in state_dict.items() if "linear2" not in k}
a = {k: v for k, v in state_dict.items() if "linear2" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__lowerCamelCase , __lowerCamelCase )
a = OffloadedWeightsLoader(state_dict=__lowerCamelCase , save_folder=__lowerCamelCase )
# Every key is there with the right value
self.assertEqual(sorted(__lowerCamelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__lowerCamelCase , weight_map[key] ) )
a = {k: v for k, v in state_dict.items() if "weight" in k}
a = {k: v for k, v in state_dict.items() if "weight" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__lowerCamelCase , __lowerCamelCase )
a = OffloadedWeightsLoader(state_dict=__lowerCamelCase , save_folder=__lowerCamelCase )
# Every key is there with the right value
self.assertEqual(sorted(__lowerCamelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__lowerCamelCase , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__lowerCamelCase , __lowerCamelCase )
# Duplicates are removed
a = OffloadedWeightsLoader(state_dict=__lowerCamelCase , save_folder=__lowerCamelCase )
# Every key is there with the right value
self.assertEqual(sorted(__lowerCamelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__lowerCamelCase , weight_map[key] ) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
a = {"a.1": 0, "a.10": 1, "a.2": 2}
a = extract_submodules_state_dict(__lowerCamelCase , ["a.1", "a.2"] )
self.assertDictEqual(__lowerCamelCase , {"a.1": 0, "a.2": 2} )
a = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2}
a = extract_submodules_state_dict(__lowerCamelCase , ["a.1", "a.2"] )
self.assertDictEqual(__lowerCamelCase , {"a.1.a": 0, "a.2.a": 2} )
| 705 |
def __magic_name__ ( A : list ):
'''simple docstring'''
for i in range(len(A ) - 1, 0, -1 ):
a = False
for j in range(A, 0, -1 ):
if unsorted[j] < unsorted[j - 1]:
a , a = unsorted[j - 1], unsorted[j]
a = True
for j in range(A ):
if unsorted[j] > unsorted[j + 1]:
a , a = unsorted[j + 1], unsorted[j]
a = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(',')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 662 | 0 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __magic_name__ ( A : Optional[int] ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings", set() )
@pytest.fixture
def __magic_name__ ( A : str ):
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : Tuple ) -> Any:
a = metric_id
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [MetricMock(_UpperCamelCase ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def __UpperCAmelCase ( self : int ) -> Tuple:
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub", HfhMock() )
@pytest.mark.parametrize(
"func, args", [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def __magic_name__ ( A : Optional[Any], A : Optional[int], A : Any, A : Tuple, A : List[str] ):
if "tmp_path" in args:
a = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(A, match="https://huggingface.co/docs/evaluate" ):
func(*A )
| 706 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowerCAmelCase : Optional[Any] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
__lowerCAmelCase : str = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
__lowerCAmelCase : List[Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[List[List[str]]] , __lowerCamelCase : List[List[str]] , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__lowerCamelCase , hypotheses=__lowerCamelCase , min_len=__lowerCamelCase , max_len=__lowerCamelCase )
}
| 662 | 0 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class snake_case__ (datasets.BuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None
class snake_case__ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PandasConfig
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
return datasets.DatasetInfo(features=self.config.features )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] ) -> Any:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
a = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCamelCase , (str, list, tuple) ):
a = data_files
if isinstance(__lowerCamelCase , __lowerCamelCase ):
a = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
a = []
for split_name, files in data_files.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
a = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCamelCase , gen_kwargs={"files": files} ) )
return splits
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : pa.Table ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
a = table_cast(__lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[str] ) -> List[str]:
for i, file in enumerate(itertools.chain.from_iterable(__lowerCamelCase ) ):
with open(__lowerCamelCase , "rb" ) as f:
a = pa.Table.from_pandas(pd.read_pickle(__lowerCamelCase ) )
yield i, self._cast_table(__lowerCamelCase )
| 707 |
import argparse
import os
import re
__lowerCAmelCase : Union[str, Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__lowerCAmelCase : Dict = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
__lowerCAmelCase : Any = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __magic_name__ ( A : int, A : bool = False ):
'''simple docstring'''
with open(A, "r", encoding="utf-8" ) as f:
a = f.read()
a = content.split("\n" )
a = []
a = 0
while line_idx < len(A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a = len(re.search(R"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a = sorted(A, key=lambda A : _re_identifier.search(A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(A, "w", encoding="utf-8" ) as f:
f.write("\n".join(A ) )
elif "\n".join(A ) != content:
return True
def __magic_name__ ( A : bool = False ):
'''simple docstring'''
a = [os.path.join(A, A ) for f in os.listdir(A ) if f.endswith(".py" )]
a = [sort_auto_mapping(A, overwrite=A ) for fname in fnames]
if not overwrite and any(A ):
a = [f for f, d in zip(A, A ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {", ".join(A )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 662 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> Dict:
a = tempfile.mkdtemp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
a = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = self.get_image_processor()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
a = self.get_image_processor(do_normalize=__lowerCamelCase )
a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCamelCase , return_tensors="np" )
a = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCamelCase )
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 708 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = '▁'
__lowerCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model'}
__lowerCAmelCase : int = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
__lowerCAmelCase : Any = {
'google/reformer-crime-and-punishment': 52_4288,
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Dict=[] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ) -> None:
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, int]:
a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Optional[Any]:
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : str , __lowerCamelCase : Tuple ) -> List[Any]:
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : int , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict ) -> Any:
return self.sp_model.piece_to_id(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ) -> str:
if index < self.sp_model.get_piece_size():
a = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
a = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 662 | 0 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[Any] = 'tiny-wmt19-en-ru'
# Build
# borrowed from a test
__lowerCAmelCase : List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__lowerCAmelCase : Dict = dict(zip(vocab, range(len(vocab))))
__lowerCAmelCase : str = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Any = Path(tmpdirname)
__lowerCAmelCase : Any = build_dir / VOCAB_FILES_NAMES['src_vocab_file']
__lowerCAmelCase : Tuple = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file']
__lowerCAmelCase : List[str] = build_dir / VOCAB_FILES_NAMES['merges_file']
with open(src_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, 'w') as fp:
fp.write('\n'.join(merges))
__lowerCAmelCase : Union[str, Any] = FSMTTokenizer(
langs=['en', 'ru'],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__lowerCAmelCase : Any = FSMTConfig(
langs=['ru', 'en'],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__lowerCAmelCase : Optional[int] = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
__lowerCAmelCase : Dict = tokenizer(['Making tiny model'], return_tensors='pt')
__lowerCAmelCase : int = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 709 |
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase : List[str] = [8, 5, 9, 7]
__lowerCAmelCase : str = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ) -> None:
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __UpperCAmelCase ( self : List[str] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __UpperCAmelCase ( self : str ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __UpperCAmelCase ( self : Dict ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __UpperCAmelCase ( self : Dict ) -> dict[int, list[int]]:
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Any ) -> None:
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
a = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def __UpperCAmelCase ( self : Any ) -> str:
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 0 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def __magic_name__ ( A : List[str], A : Tuple ):
'''simple docstring'''
a = Mock()
a = conn, Mock()
a = iter([1, None] )
a = lambda A : next(A )
# ===== invoke =====
send_file(filename="mytext.txt", testing=A )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 710 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = TypeVar('DatasetType', Dataset, IterableDataset)
def __magic_name__ ( A : List[DatasetType], A : Optional[List[float]] = None, A : Optional[int] = None, A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
def __magic_name__ ( A : List[DatasetType], A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : int = 0, ):
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A, info=A, split=A, axis=A )
else:
return _concatenate_iterable_datasets(A, info=A, split=A, axis=A )
| 662 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__lowerCAmelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCAmelCase : List[str] = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def __magic_name__ ( A : Optional[int], A : Optional[Any], A : Tuple=8 ):
'''simple docstring'''
a = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : UNetaDConditionModel , __lowerCamelCase : DDPMScheduler , __lowerCamelCase : VQModel , ) -> List[str]:
super().__init__()
self.register_modules(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , movq=__lowerCamelCase , )
a = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ) -> Tuple:
if latents is None:
a = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase , dtype=__lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
a = latents.to(__lowerCamelCase )
a = latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[str, Any]=0 ) -> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
a = torch.device(f"""cuda:{gpu_id}""" )
a = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Any=0 ) -> str:
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
a = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a = None
for cpu_offloaded_model in [self.unet, self.movq]:
a , a = cpu_offload_with_hook(__lowerCamelCase , __lowerCamelCase , prev_module_hook=__lowerCamelCase )
# We'll offload the last model manually.
a = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCamelCase )
def __call__( self : Union[str, Any] , __lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 1_00 , __lowerCamelCase : float = 4.0 , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ) -> Union[str, Any]:
a = self._execution_device
a = guidance_scale > 1.0
if isinstance(__lowerCamelCase , __lowerCamelCase ):
a = torch.cat(__lowerCamelCase , dim=0 )
a = image_embeds.shape[0] * num_images_per_prompt
if isinstance(__lowerCamelCase , __lowerCamelCase ):
a = torch.cat(__lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
a = image_embeds.repeat_interleave(__lowerCamelCase , dim=0 )
a = negative_image_embeds.repeat_interleave(__lowerCamelCase , dim=0 )
a = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__lowerCamelCase )
self.scheduler.set_timesteps(__lowerCamelCase , device=__lowerCamelCase )
a = self.scheduler.timesteps
a = self.unet.config.in_channels
a , a = downscale_height_and_width(__lowerCamelCase , __lowerCamelCase , self.movq_scale_factor )
# create initial latent
a = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a = {"image_embeds": image_embeds}
a = self.unet(
sample=__lowerCamelCase , timestep=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , added_cond_kwargs=__lowerCamelCase , return_dict=__lowerCamelCase , )[0]
if do_classifier_free_guidance:
a , a = noise_pred.split(latents.shape[1] , dim=1 )
a , a = noise_pred.chunk(2 )
a , a = variance_pred.chunk(2 )
a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a , a = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a = self.scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase , )[0]
# post-processing
a = self.movq.decode(__lowerCamelCase , force_not_quantize=__lowerCamelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
a = image * 0.5 + 0.5
a = image.clamp(0 , 1 )
a = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 711 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : List[str] = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
__lowerCAmelCase : Any = '▁'
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : str = BigBirdTokenizer
SCREAMING_SNAKE_CASE_ : str = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self : int , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Tuple="[CLS]" , **__lowerCamelCase : Optional[Any] , ) -> List[Any]:
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
a = vocab_file
a = False if not self.vocab_file else True
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 662 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : List[Any] = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 712 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name", type=A, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", )
parser.add_argument(
"--dataset_config", type=A, default="wikitext-103-raw-v1", help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path", type=A, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", )
parser.add_argument(
"--shard_size", type=A, default=1000, help="Number of entries to go in a single shard.", )
parser.add_argument("--split", type=A, default="train", choices=["train", "test", "validation"] )
parser.add_argument(
"--limit", default=A, type=A, help="Limit the number of shards (used for debugging).", )
parser.add_argument(
"--max_length", type=A, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8.", )
parser.add_argument(
"--output_dir", default="tf-tpu", type=A, help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket.", )
a = parser.parse_args()
return args
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
def fn(A : Tuple ):
return tokenizer(examples["text"] )
return fn
def __magic_name__ ( A : Any ):
'''simple docstring'''
a = []
for i in range(len(tokenized_data["input_ids"] ) ):
a = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
a = tf.train.Features(feature=A )
a = tf.train.Example(features=A )
a = example.SerializeToString()
records.append(A )
return records
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
a = min(len(A ), args.limit )
a = dataset.select(range(A ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a = os.path.join(args.output_dir, args.split )
if not os.path.exists(A ):
os.makedirs(A )
else:
a = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
a = tokenize_function(A )
a = dataset.map(A, batched=A, num_proc=4, remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A : List[Any] ):
# Concatenate all texts.
a = {k: sum(examples[k], [] ) for k in examples.keys()}
a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a = {
k: [t[i : i + args.max_length] for i in range(0, A, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a = dataset_tokenized.map(A, batched=A, batch_size=1000, num_proc=4 )
a = 0
a = 0
for shard in range(0, len(A ), args.shard_size ):
a = grouped_dataset[shard : shard + args.shard_size]
a = len(dataset_snapshot["input_ids"] )
a = os.path.join(A, F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
a = get_serialized_examples(A )
with tf.io.TFRecordWriter(A ) as out_file:
for i in range(len(A ) ):
a = serialized_examples[i]
out_file.write(A )
print("Wrote file {} containing {} records".format(A, A ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""", "w" ) as f:
print(F"""Total {args.split} records: {total_records}""", file=A )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = parse_args()
main(args)
| 662 | 0 |
from ...configuration_utils import PretrainedConfig
__lowerCAmelCase : Dict = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = """tapas"""
def __init__( self : int , __lowerCamelCase : Optional[Any]=3_05_22 , __lowerCamelCase : Optional[Any]=7_68 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : List[str]=30_72 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=10_24 , __lowerCamelCase : Any=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Dict=1e-12 , __lowerCamelCase : List[str]=0 , __lowerCamelCase : Dict=10.0 , __lowerCamelCase : List[str]=0 , __lowerCamelCase : int=1.0 , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[Any]=1.0 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[str]=1.0 , __lowerCamelCase : List[str]=1.0 , __lowerCamelCase : List[Any]=False , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Union[str, Any]="ratio" , __lowerCamelCase : int=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Dict=64 , __lowerCamelCase : Any=32 , __lowerCamelCase : List[Any]=False , __lowerCamelCase : str=True , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : Any=None , **__lowerCamelCase : Optional[Any] , ) -> Any:
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_sizes
a = initializer_range
a = layer_norm_eps
# Fine-tuning task hyperparameters
a = positive_label_weight
a = num_aggregation_labels
a = aggregation_loss_weight
a = use_answer_as_supervision
a = answer_loss_importance
a = use_normalized_answer_loss
a = huber_loss_delta
a = temperature
a = aggregation_temperature
a = use_gumbel_for_cells
a = use_gumbel_for_aggregation
a = average_approximation_function
a = cell_selection_preference
a = answer_loss_cutoff
a = max_num_rows
a = max_num_columns
a = average_logits_per_cell
a = select_one_column
a = allow_empty_column_selection
a = init_cell_selection_weights_to_zero
a = reset_position_index_per_cell
a = disable_per_token_loss
# Aggregation hyperparameters
a = aggregation_labels
a = no_aggregation_label_index
if isinstance(self.aggregation_labels , __lowerCamelCase ):
a = {int(__lowerCamelCase ): v for k, v in aggregation_labels.items()}
| 713 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
a = {}
a = tokenizer(example["content"], truncation=A )["input_ids"]
a = len(example["content"] ) / len(output["input_ids"] )
return output
__lowerCAmelCase : Dict = HfArgumentParser(PretokenizationArguments)
__lowerCAmelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCAmelCase : List[Any] = multiprocessing.cpu_count()
__lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__lowerCAmelCase : List[Any] = time.time()
__lowerCAmelCase : str = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : int = time.time()
__lowerCAmelCase : Optional[int] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 662 | 0 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = 'https://openaipublic.azureedge.net/jukebox/models/'
__lowerCAmelCase : Optional[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _UpperCamelCase ( A : Dict ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
a = key.replace(".model.1.bias", ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
a = key.replace(".model.1.weight", ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
a = key.replace(".model.3.bias", ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
a = key.replace(".model.3.weight", ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
a = key.replace("conditioner_blocks.0", "conditioner_blocks" )
if "prime_prior" in key:
a = key.replace("prime_prior", "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
a = key.replace(".emb.", "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k", ".codebook" )
if "y_emb." in key:
return key.replace("y_emb.", "metadata_embedding." )
if "x_emb.emb." in key:
a = key.replace("0.x_emb.emb", "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln", "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln", ".layer_norm" )
if "_ln" in key:
return key.replace("_ln", "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj", "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out", "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out", "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb", "embed_tokens" )
return key
def _UpperCamelCase ( A : List[str], A : Any, A : List[str], A : List[Any] ):
'''simple docstring'''
a = {}
import re
a = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
a = re.compile(
R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
a = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
a = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
a = re.compile(
R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
a = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
a = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
a = re.compile(
R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
a = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(A ):
a = re_encoder_block_conv_in.match(A )
a = regex_match.groups()
a = int(groups[2] ) * 2 + int(groups[3] )
a = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
a = re_encoder_block_conv_in.sub(A, A )
elif re_encoder_block_resnet.fullmatch(A ):
a = re_encoder_block_resnet.match(A )
a = regex_match.groups()
a = int(groups[2] ) * 2 + int(groups[3] )
a = {"1": 1, "3": 2}[groups[-2]]
a = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
a = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
a = prefix + resnet_block
a = re_encoder_block_resnet.sub(A, A )
elif re_encoder_block_proj_out.fullmatch(A ):
a = re_encoder_block_proj_out.match(A )
a = regex_match.groups()
a = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
a = re_encoder_block_proj_out.sub(A, A )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(A ):
a = re_decoder_block_conv_out.match(A )
a = regex_match.groups()
a = int(groups[2] ) * 2 + int(groups[3] ) - 2
a = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
a = re_decoder_block_conv_out.sub(A, A )
elif re_decoder_block_resnet.fullmatch(A ):
a = re_decoder_block_resnet.match(A )
a = regex_match.groups()
a = int(groups[2] ) * 2 + int(groups[3] ) - 2
a = {"1": 1, "3": 2}[groups[-2]]
a = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
a = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
a = prefix + resnet_block
a = re_decoder_block_resnet.sub(A, A )
elif re_decoder_block_proj_in.fullmatch(A ):
a = re_decoder_block_proj_in.match(A )
a = regex_match.groups()
a = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
a = re_decoder_block_proj_in.sub(A, A )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(A ):
a = re_prior_cond_conv_out.match(A )
a = regex_match.groups()
a = int(groups[1] ) * 2 + int(groups[2] ) - 2
a = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
a = re_prior_cond_conv_out.sub(A, A )
elif re_prior_cond_resnet.fullmatch(A ):
a = re_prior_cond_resnet.match(A )
a = regex_match.groups()
a = int(groups[1] ) * 2 + int(groups[2] ) - 2
a = {"1": 1, "3": 2}[groups[-2]]
a = F"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
a = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
a = prefix + resnet_block
a = re_prior_cond_resnet.sub(A, A )
elif re_prior_cond_proj_in.fullmatch(A ):
a = re_prior_cond_proj_in.match(A )
a = regex_match.groups()
a = F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
a = re_prior_cond_proj_in.sub(A, A )
# keep original key
else:
a = original_key
a = replace_key(A )
if F"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(F"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape:
a = model_state_dict[F"""{key_prefix}.{key}"""]
print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
a = original_key
a = original_key
a = value
return new_dict
@torch.no_grad()
def _UpperCamelCase ( A : Optional[Any]=None, A : Tuple=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
a = requests.get(F"""{PREFIX}{file}""", allow_redirects=A )
os.makedirs(F"""{pytorch_dump_folder_path}/""", exist_ok=A )
open(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""", "wb" ).write(r.content )
a = MODEL_MAPPING[model_name.split("/" )[-1]]
a = JukeboxConfig.from_pretrained(A )
a = JukeboxModel(A )
a = []
a = {}
for i, dict_name in enumerate(A ):
a = torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )["model"]
a = {}
for k in old_dic.keys():
if k.endswith(".b" ):
a = old_dic[k]
elif k.endswith(".w" ):
a = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
a = old_dic[k]
else:
a = old_dic[k]
a = "vqvae" if i == 0 else F"""priors.{3 - i}"""
a = fix_jukebox_keys(A, model.state_dict(), A, A )
weight_dict.append(A )
a = weight_dict.pop(0 )
model.vqvae.load_state_dict(A )
for i in range(len(A ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(A ).mkdir(exist_ok=A )
with open(F"""{pytorch_dump_folder_path}/mapping.json""", "w" ) as txtfile:
json.dump(A, A )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
return weight_dict
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 714 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Union[str, Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=1 ) -> Union[str, Any]:
a = tokenizer
a = dataset
a = len(__lowerCamelCase ) if n_tasks is None else n_tasks
a = n_copies
def __iter__( self : Tuple ) -> str:
a = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
a = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]:
a = start_length
a = eof_strings
a = tokenizer
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
a = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__lowerCamelCase )
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a = re.split("(%s)" % "|".join(A ), A )
# last string should be ""
return "".join(string_list[:-2] )
def __magic_name__ ( A : Union[str, Any], A : Optional[Any], A : List[Any], A : Optional[Any], A : List[str], A : List[Any]=20, **A : Union[str, Any] ):
'''simple docstring'''
a = defaultdict(A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A ) ):
with torch.no_grad():
a = batch["ids"].shape[-1]
a = accelerator.unwrap_model(A ).generate(
input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=A, **A )
# each task is generated batch_size times
a = batch["task_id"].repeat(A )
a = accelerator.pad_across_processes(
A, dim=1, pad_index=tokenizer.pad_token_id )
a , a = accelerator.gather((generated_tokens, generated_tasks) )
a = generated_tokens.cpu().numpy()
a = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A, A ):
gen_token_dict[task].append(A )
a = [[] for _ in range(A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a = tokenizer.decode(A, skip_special_tokens=A, clean_up_tokenization_spaces=A )
code_gens[task].append(remove_last_block(A ) )
return code_gens
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser(A )
a = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a = "false"
if args.num_workers is None:
a = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a = Accelerator()
set_seed(args.seed, device_specific=A )
# Load model and tokenizer
a = AutoTokenizer.from_pretrained(args.model_ckpt )
a = tokenizer.eos_token
a = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0, A, A )] ),
}
# Load evaluation dataset and metric
a = load_dataset("openai_humaneval" )
a = load_metric("code_eval" )
a = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
a = args.n_samples // args.batch_size
a = TokenizedDataset(A, human_eval["test"], n_copies=A, n_tasks=A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a = DataLoader(A, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a = code_eval_metric.compute(references=[""], predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
a , a = accelerator.prepare(A, A )
a = complete_code(
A, A, A, A, n_tasks=A, batch_size=args.batch_size, **A, )
if accelerator.is_main_process:
a = []
for task in tqdm(range(A ) ):
a = human_eval["test"][task]["test"]
a = F"""check({human_eval["test"][task]["entry_point"]})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
a , a = code_eval_metric.compute(
references=A, predictions=A, num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file, "w" ) as fp:
json.dump(A, A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 662 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name", type=A, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", )
parser.add_argument(
"--dataset_config", type=A, default="wikitext-103-raw-v1", help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path", type=A, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", )
parser.add_argument(
"--shard_size", type=A, default=1000, help="Number of entries to go in a single shard.", )
parser.add_argument("--split", type=A, default="train", choices=["train", "test", "validation"] )
parser.add_argument(
"--limit", default=A, type=A, help="Limit the number of shards (used for debugging).", )
parser.add_argument(
"--max_length", type=A, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8.", )
parser.add_argument(
"--output_dir", default="tf-tpu", type=A, help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket.", )
a = parser.parse_args()
return args
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
def fn(A : Tuple ):
return tokenizer(examples["text"] )
return fn
def __magic_name__ ( A : Any ):
'''simple docstring'''
a = []
for i in range(len(tokenized_data["input_ids"] ) ):
a = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
a = tf.train.Features(feature=A )
a = tf.train.Example(features=A )
a = example.SerializeToString()
records.append(A )
return records
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
a = min(len(A ), args.limit )
a = dataset.select(range(A ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a = os.path.join(args.output_dir, args.split )
if not os.path.exists(A ):
os.makedirs(A )
else:
a = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
a = tokenize_function(A )
a = dataset.map(A, batched=A, num_proc=4, remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A : List[Any] ):
# Concatenate all texts.
a = {k: sum(examples[k], [] ) for k in examples.keys()}
a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a = {
k: [t[i : i + args.max_length] for i in range(0, A, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a = dataset_tokenized.map(A, batched=A, batch_size=1000, num_proc=4 )
a = 0
a = 0
for shard in range(0, len(A ), args.shard_size ):
a = grouped_dataset[shard : shard + args.shard_size]
a = len(dataset_snapshot["input_ids"] )
a = os.path.join(A, F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
a = get_serialized_examples(A )
with tf.io.TFRecordWriter(A ) as out_file:
for i in range(len(A ) ):
a = serialized_examples[i]
out_file.write(A )
print("Wrote file {} containing {} records".format(A, A ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""", "w" ) as f:
print(F"""Total {args.split} records: {total_records}""", file=A )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = parse_args()
main(args)
| 715 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Any = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 0 |
from statistics import mean
import numpy as np
def __magic_name__ ( A : list, A : list, A : list, A : int ):
'''simple docstring'''
a = 0
# Number of processes finished
a = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
a = [0] * no_of_process
# List to include calculation results
a = [0] * no_of_process
# Sort by arrival time.
a = [burst_time[i] for i in np.argsort(A )]
a = [process_name[i] for i in np.argsort(A )]
arrival_time.sort()
while no_of_process > finished_process_count:
a = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
a = arrival_time[i]
a = 0
# Index showing the location of the process being performed
a = 0
# Saves the current response ratio.
a = 0
for i in range(0, A ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
a = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
a = temp
a = i
# Calculate the turn around time
a = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
a = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __magic_name__ ( A : list, A : list, A : list, A : int ):
'''simple docstring'''
a = [0] * no_of_process
for i in range(0, A ):
a = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__lowerCAmelCase : str = 5
__lowerCAmelCase : int = ['A', 'B', 'C', 'D', 'E']
__lowerCAmelCase : Union[str, Any] = [1, 2, 3, 4, 5]
__lowerCAmelCase : Optional[Any] = [1, 2, 3, 4, 5]
__lowerCAmelCase : Union[str, Any] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__lowerCAmelCase : Any = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('Process name \tArrival time \tBurst time \tTurn around time \tWaiting time')
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 716 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : Optional[int] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a = {"unk_token": "<unk>"}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
a = "lower newer"
a = "lower newer"
return input_text, output_text
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = "lower newer"
a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokens + [tokenizer.unk_token]
a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self : Any ) -> str:
a = self.get_tokenizer()
a = "Encode this sequence."
a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# Testing spaces after special tokens
a = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
a = "Encode <mask> sequence"
a = "Encode <mask>sequence"
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
pass
def __UpperCAmelCase ( self : int ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = "A, <mask> AllenNLP sentence."
a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a = f"""{text_of_1_token} {text_of_1_token}"""
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
| 662 | 0 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__lowerCAmelCase : int = NewType('DataClass', Any)
__lowerCAmelCase : Union[str, Any] = NewType('DataClassType', Any)
def __magic_name__ ( A : Tuple ):
'''simple docstring'''
if isinstance(A, A ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __magic_name__ ( A : list ):
'''simple docstring'''
a = {str(A ): choice for choice in choices}
return lambda A : str_to_choice.get(A, A )
def __magic_name__ ( *,
A : Union[str, List[str]] = None, A : str = None, A : Any = dataclasses.MISSING, A : Callable[[], Any] = dataclasses.MISSING, A : dict = None, **A : Dict, ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
a = {}
if aliases is not None:
a = aliases
if help is not None:
a = help
return dataclasses.field(metadata=A, default=A, default_factory=A, **A )
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Iterable[DataClassType]
def __init__( self : Tuple , __lowerCamelCase : Union[DataClassType, Iterable[DataClassType]] , **__lowerCamelCase : Union[str, Any] ) -> List[str]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
a = ArgumentDefaultsHelpFormatter
super().__init__(**__lowerCamelCase )
if dataclasses.is_dataclass(__lowerCamelCase ):
a = [dataclass_types]
a = list(__lowerCamelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__lowerCamelCase )
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : ArgumentParser , __lowerCamelCase : dataclasses.Field ) -> Tuple:
a = f"""--{field.name}"""
a = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __lowerCamelCase ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
a = kwargs.pop("aliases" , [] )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
a = [aliases]
a = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(__lowerCamelCase , "UnionType" ) and isinstance(__lowerCamelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__lowerCamelCase ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f""" Problem encountered in field '{field.name}'.""" )
if type(__lowerCamelCase ) not in field.type.__args__:
# filter `str` in Union
a = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
a = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
a = (
field.type.__args__[0] if isinstance(__lowerCamelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
a = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
a = {}
if origin_type is Literal or (isinstance(field.type , __lowerCamelCase ) and issubclass(field.type , __lowerCamelCase )):
if origin_type is Literal:
a = field.type.__args__
else:
a = [x.value for x in field.type]
a = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
a = field.default
else:
a = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
a = copy(__lowerCamelCase )
# Hack because type=bool in argparse does not behave as we want.
a = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
a = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
a = default
# This tells argparse we accept 0 or 1 value after --field_name
a = "?"
# This is the value that will get picked if we do --field_name (without value)
a = True
elif isclass(__lowerCamelCase ) and issubclass(__lowerCamelCase , __lowerCamelCase ):
a = field.type.__args__[0]
a = "+"
if field.default_factory is not dataclasses.MISSING:
a = field.default_factory()
elif field.default is dataclasses.MISSING:
a = True
else:
a = field.type
if field.default is not dataclasses.MISSING:
a = field.default
elif field.default_factory is not dataclasses.MISSING:
a = field.default_factory()
else:
a = True
parser.add_argument(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
a = False
parser.add_argument(f"""--no_{field.name}""" , action="store_false" , dest=field.name , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : DataClassType ) -> int:
if hasattr(__lowerCamelCase , "_argument_group_name" ):
a = self.add_argument_group(dtype._argument_group_name )
else:
a = self
try:
a = get_type_hints(__lowerCamelCase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__lowerCamelCase ):
a = ".".join(map(__lowerCamelCase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(__lowerCamelCase ):
if not field.init:
continue
a = type_hints[field.name]
self._parse_dataclass_field(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any=None , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
a = []
if args_filename:
args_files.append(Path(__lowerCamelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
a = ArgumentParser()
args_file_parser.add_argument(__lowerCamelCase , type=__lowerCamelCase , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
a , a = args_file_parser.parse_known_args(args=__lowerCamelCase )
a = vars(__lowerCamelCase ).get(args_file_flag.lstrip("-" ) , __lowerCamelCase )
if cmd_args_file_paths:
args_files.extend([Path(__lowerCamelCase ) for p in cmd_args_file_paths] )
a = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
a = file_args + args if args is not None else file_args + sys.argv[1:]
a , a = self.parse_known_args(args=__lowerCamelCase )
a = []
for dtype in self.dataclass_types:
a = {f.name for f in dataclasses.fields(__lowerCamelCase ) if f.init}
a = {k: v for k, v in vars(__lowerCamelCase ).items() if k in keys}
for k in keys:
delattr(__lowerCamelCase , __lowerCamelCase )
a = dtype(**__lowerCamelCase )
outputs.append(__lowerCamelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__lowerCamelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict[str, Any] , __lowerCamelCase : bool = False ) -> Tuple[DataClass, ...]:
a = set(args.keys() )
a = []
for dtype in self.dataclass_types:
a = {f.name for f in dataclasses.fields(__lowerCamelCase ) if f.init}
a = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
a = dtype(**__lowerCamelCase )
outputs.append(__lowerCamelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__lowerCamelCase )}""" )
return tuple(__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : bool = False ) -> Tuple[DataClass, ...]:
with open(Path(__lowerCamelCase ) , encoding="utf-8" ) as open_json_file:
a = json.loads(open_json_file.read() )
a = self.parse_dict(__lowerCamelCase , allow_extra_keys=__lowerCamelCase )
return tuple(__lowerCamelCase )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : bool = False ) -> Tuple[DataClass, ...]:
a = self.parse_dict(yaml.safe_load(Path(__lowerCamelCase ).read_text() ) , allow_extra_keys=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 717 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__lowerCAmelCase : int = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 662 | 0 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : Optional[int]=7 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=99 , __lowerCamelCase : Optional[Any]=32 , __lowerCamelCase : Union[str, Any]=5 , __lowerCamelCase : Dict=4 , __lowerCamelCase : List[Any]=37 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : int=50 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : int=True , __lowerCamelCase : Optional[int]=None , ) -> Union[str, Any]:
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = initializer_range
a = use_labels
a = scope
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = self.get_config()
return config, input_ids, input_mask, token_labels
def __UpperCAmelCase ( self : Tuple ) -> int:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
(
(
a
) , (
a
) , (
a
) , (
a
) ,
) = self.prepare_config_and_inputs()
a = True
a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , **__lowerCamelCase : Any , ) -> Optional[int]:
a = BertGenerationEncoder(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
a = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] , ) -> List[Any]:
a = True
a = BertGenerationEncoder(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )
a = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : str , **__lowerCamelCase : str , ) -> Any:
a = True
a = True
a = BertGenerationDecoder(config=__lowerCamelCase ).to(__lowerCamelCase ).eval()
# first forward pass
a = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase , )
a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) , config.vocab_size )
a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a = torch.cat([input_ids, next_tokens] , dim=-1 )
a = torch.cat([input_mask, next_mask] , dim=-1 )
a = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )["hidden_states"][0]
a = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )["hidden_states"][0]
# select random slice
a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a = output_from_no_past[:, -3:, random_slice_idx].detach()
a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , *__lowerCamelCase : Dict , ) -> List[str]:
a = BertGenerationDecoder(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
a , a , a , a = self.prepare_config_and_inputs()
a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
a = BertGenerationEncoderTester(self )
a = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Dict ) -> str:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
a , a , a , a = self.model_tester.prepare_config_and_inputs()
a = "bert"
self.model_tester.create_and_check_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
# This regression test was failing with PyTorch < 1.3
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
a = None
self.model_tester.create_and_check_model_as_decoder(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__lowerCamelCase )
@slow
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : str ) -> List[Any]:
a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
a = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
a = model(__lowerCamelCase )[0]
a = torch.Size([1, 8, 10_24] )
self.assertEqual(output.shape , __lowerCamelCase )
a = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
@require_torch
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
a = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
a = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
a = model(__lowerCamelCase )[0]
a = torch.Size([1, 8, 5_03_58] )
self.assertEqual(output.shape , __lowerCamelCase )
a = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
| 718 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : Dict = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = """microsoft/speecht5_tts"""
SCREAMING_SNAKE_CASE_ : Tuple = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
SCREAMING_SNAKE_CASE_ : Tuple = """text_reader"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = SpeechTaProcessor
SCREAMING_SNAKE_CASE_ : Optional[Any] = SpeechTaForTextToSpeech
SCREAMING_SNAKE_CASE_ : str = SpeechTaHifiGan
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""text"""]
SCREAMING_SNAKE_CASE_ : Dict = ["""audio"""]
def __UpperCAmelCase ( self : str ) -> List[str]:
if self.post_processor is None:
a = "microsoft/speecht5_hifigan"
super().setup()
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None ) -> Optional[Any]:
a = self.pre_processor(text=__lowerCamelCase , return_tensors="pt" , truncation=__lowerCamelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
a = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation" )
a = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[Any] ) -> Optional[int]:
with torch.no_grad():
return self.model.generate_speech(**__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[Any] ) -> Optional[int]:
with torch.no_grad():
return self.post_processor(__lowerCamelCase ).cpu().detach()
| 719 |
import math
import flax.linen as nn
import jax.numpy as jnp
def __magic_name__ ( A : jnp.ndarray, A : int, A : float = 1, A : float = 1, A : float = 1.0E4, A : bool = False, A : float = 1.0, ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
a = float(embedding_dim // 2 )
a = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
a = min_timescale * jnp.exp(jnp.arange(A, dtype=jnp.floataa ) * -log_timescale_increment )
a = jnp.expand_dims(A, 1 ) * jnp.expand_dims(A, 0 )
# scale embeddings
a = scale * emb
if flip_sin_to_cos:
a = jnp.concatenate([jnp.cos(A ), jnp.sin(A )], axis=1 )
else:
a = jnp.concatenate([jnp.sin(A ), jnp.cos(A )], axis=1 )
a = jnp.reshape(A, [jnp.shape(A )[0], embedding_dim] )
return signal
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__lowerCamelCase )
a = nn.silu(__lowerCamelCase )
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__lowerCamelCase )
return temb
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : float = 1
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : int ) -> Union[str, Any]:
return get_sinusoidal_embeddings(
__lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 662 | 0 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict , *__lowerCamelCase : Dict , **__lowerCamelCase : Dict ) -> None:
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 720 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> Dict:
a = tempfile.mkdtemp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
a = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = self.get_image_processor()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
a = self.get_image_processor(do_normalize=__lowerCamelCase )
a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCamelCase , return_tensors="np" )
a = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCamelCase )
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 662 | 0 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class snake_case__ :
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int]=99 , __lowerCamelCase : Any=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Union[str, Any]=9 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Dict=False , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Any=37 , __lowerCamelCase : Any=8 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=0.002 , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Any=0 , __lowerCamelCase : Union[str, Any]=0 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[str]=None , ) -> Union[str, Any]:
a = parent
a = batch_size
a = encoder_seq_length
a = decoder_seq_length
# For common tests
a = self.decoder_seq_length
a = is_training
a = use_attention_mask
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = d_ff
a = relative_attention_num_buckets
a = dropout_rate
a = initializer_factor
a = eos_token_id
a = pad_token_id
a = decoder_start_token_id
a = None
a = decoder_layers
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
return TaConfig.from_pretrained("google/umt5-base" )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[str]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=None , ) -> Optional[Any]:
if attention_mask is None:
a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__lowerCamelCase )
if decoder_head_mask is None:
a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__lowerCamelCase )
if cross_attn_head_mask is None:
a = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
a = input_ids.clamp(self.pad_token_id + 1 )
a = decoder_input_ids.clamp(self.pad_token_id + 1 )
a = self.get_config()
a = config.num_attention_heads
a = self.prepare_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, input_dict
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
a , a = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : str ) -> int:
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , ) -> str:
a = UMTaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(
input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase , decoder_attention_mask=__lowerCamelCase , )
a = model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase )
a = result.last_hidden_state
a = result.past_key_values
a = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__lowerCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Tuple , ) -> List[Any]:
a = UMTaModel(config=__lowerCamelCase ).get_decoder().to(__lowerCamelCase ).eval()
# first forward pass
a = model(__lowerCamelCase , use_cache=__lowerCamelCase )
a = model(__lowerCamelCase )
a = model(__lowerCamelCase , use_cache=__lowerCamelCase )
self.parent.assertTrue(len(__lowerCamelCase ) == len(__lowerCamelCase ) )
self.parent.assertTrue(len(__lowerCamelCase ) == len(__lowerCamelCase ) + 1 )
a , a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
a = torch.cat([input_ids, next_tokens] , dim=-1 )
a = model(__lowerCamelCase )["last_hidden_state"]
a = model(__lowerCamelCase , past_key_values=__lowerCamelCase )["last_hidden_state"]
# select random slice
a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a = output_from_no_past[:, -1, random_slice_idx].detach()
a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def __UpperCAmelCase ( self : int , __lowerCamelCase : str , __lowerCamelCase : int , ) -> List[str]:
a = UMTaModel(config=__lowerCamelCase ).to(__lowerCamelCase ).half().eval()
a = model(**__lowerCamelCase )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(__lowerCamelCase ).any().item() )
@require_torch
class snake_case__ (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE_ : Tuple = [0.8, 0.9]
def __UpperCAmelCase ( self : Dict ) -> int:
a = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def __UpperCAmelCase ( self : int ) -> int:
a = self.model_tester.prepare_config_and_inputs()
a = UMTaModel(config_and_inputs[0] ).to(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__lowerCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=__lowerCamelCase , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def __UpperCAmelCase ( self : int ) -> Optional[int]:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> Optional[int]:
a = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
a = self.model_tester.prepare_config_and_inputs()
a = config_and_inputs[0]
a = UMTaForConditionalGeneration(__lowerCamelCase ).eval()
model.to(__lowerCamelCase )
a = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=__lowerCamelCase ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCamelCase ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCamelCase ),
}
for attn_name, (name, mask) in zip(__lowerCamelCase , head_masking.items() ):
a = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
a = torch.ones(
config.num_decoder_layers , config.num_heads , device=__lowerCamelCase )
a = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=__lowerCamelCase , return_dict_in_generate=__lowerCamelCase , **__lowerCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def __UpperCAmelCase ( self : str ) -> str:
a = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=__lowerCamelCase ).to(__lowerCamelCase )
a = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=__lowerCamelCase , legacy=__lowerCamelCase )
a = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
a = tokenizer(__lowerCamelCase , return_tensors="pt" , padding=__lowerCamelCase ).input_ids
# fmt: off
a = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(__lowerCamelCase , __lowerCamelCase )
a = model.generate(input_ids.to(__lowerCamelCase ) )
a = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 721 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$", A ).groups()[0]
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None ) -> Tuple:
a = file_names
a = image_transform
a = label_to_id
def __len__( self : Any ) -> Tuple:
return len(self.file_names )
def __getitem__( self : List[Any] , __lowerCamelCase : List[Any] ) -> int:
a = self.file_names[idx]
a = PIL.Image.open(__lowerCamelCase )
a = raw_image.convert("RGB" )
if self.image_transform is not None:
a = self.image_transform(__lowerCamelCase )
a = extract_label(__lowerCamelCase )
if self.label_to_id is not None:
a = self.label_to_id[label]
return {"image": image, "label": label}
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
if args.with_tracking:
a = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir )
else:
a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config["lr"]
a = int(config["num_epochs"] )
a = int(config["seed"] )
a = int(config["batch_size"] )
a = config["image_size"]
if not isinstance(A, (list, tuple) ):
a = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps, "isdigit" ):
if args.checkpointing_steps == "epoch":
a = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
a = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
a = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
a = os.path.split(A )[-1].split("." )[0]
accelerator.init_trackers(A, A )
# Grab all the image filenames
a = [os.path.join(args.data_dir, A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
a = [extract_label(A ) for fname in file_names]
a = list(set(A ) )
id_to_label.sort()
a = {lbl: i for i, lbl in enumerate(A )}
# Set the seed before splitting the data.
np.random.seed(A )
torch.manual_seed(A )
torch.cuda.manual_seed_all(A )
# Split our filenames between train and validation
a = np.random.permutation(len(A ) )
a = int(0.8 * len(A ) )
a = random_perm[:cut]
a = random_perm[cut:]
# For training we use a simple RandomResizedCrop
a = Compose([RandomResizedCrop(A, scale=(0.5, 1.0) ), ToTensor()] )
a = PetsDataset(
[file_names[i] for i in train_split], image_transform=A, label_to_id=A )
# For evaluation, we use a deterministic Resize
a = Compose([Resize(A ), ToTensor()] )
a = PetsDataset([file_names[i] for i in eval_split], image_transform=A, label_to_id=A )
# Instantiate dataloaders.
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = create_model("resnet50d", pretrained=A, num_classes=len(A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
a = False
for param in model.get_classifier().parameters():
a = True
# We normalize the batches of images to be a bit faster.
a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
a = torch.optim.Adam(params=model.parameters(), lr=lr / 25 )
# Instantiate learning rate scheduler
a = OneCycleLR(optimizer=A, max_lr=A, epochs=A, steps_per_epoch=len(A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
A, A, A, A, A )
# We need to keep track of how many total steps we have iterated over
a = 0
# We also need to keep track of the starting epoch so files are named properly
a = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
a = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
a = os.path.splitext(A )[0]
if "epoch" in training_difference:
a = int(training_difference.replace("epoch_", "" ) ) + 1
a = None
else:
a = int(training_difference.replace("step_", "" ) )
a = resume_step // len(A )
resume_step -= starting_epoch * len(A )
# Now we train the model
for epoch in range(A, A ):
model.train()
if args.with_tracking:
a = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
a = accelerator.skip_first_batches(A, A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
a = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
a = model(A )
a = torch.nn.functional.cross_entropy(A, batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(A, A ):
a = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
model.eval()
a = 0
a = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
with torch.no_grad():
a = model(A )
a = outputs.argmax(dim=-1 )
a , a = accelerator.gather_for_metrics((predictions, batch["label"]) )
a = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
a = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(A ),
"epoch": epoch,
}, step=A, )
if checkpointing_steps == "epoch":
a = F"""epoch_{epoch}"""
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
if args.with_tracking:
accelerator.end_training()
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir", required=A, help="The data folder on disk." )
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision", type=A, default=A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps", type=A, default=A, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", )
parser.add_argument(
"--output_dir", type=A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--resume_from_checkpoint", type=A, default=A, help="If the training should continue from a checkpoint folder.", )
parser.add_argument(
"--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", )
parser.add_argument(
"--project_dir", type=A, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", )
a = parser.parse_args()
a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(A, A )
if __name__ == "__main__":
main()
| 662 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class snake_case__ (TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : List[Any]=None , **__lowerCamelCase : Union[str, Any] ) -> int:
super().__init__(features=__lowerCamelCase )
a = torch_tensor_kwargs
import torch # noqa import torch at initialization
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ) -> str:
import torch
if isinstance(__lowerCamelCase , __lowerCamelCase ) and column:
if all(
isinstance(__lowerCamelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(__lowerCamelCase )
return column
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Optional[Any] ) -> List[Any]:
import torch
if isinstance(__lowerCamelCase , (str, bytes, type(__lowerCamelCase )) ):
return value
elif isinstance(__lowerCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
a = {}
if isinstance(__lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
a = {"dtype": torch.intaa}
elif isinstance(__lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
a = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__lowerCamelCase , PIL.Image.Image ):
a = np.asarray(__lowerCamelCase )
return torch.tensor(__lowerCamelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Optional[int] ) -> List[Any]:
import torch
# support for torch, tf, jax etc.
if hasattr(__lowerCamelCase , "__array__" ) and not isinstance(__lowerCamelCase , torch.Tensor ):
a = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__lowerCamelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__lowerCamelCase ) for substruct in data_struct] )
elif isinstance(__lowerCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__lowerCamelCase ) for substruct in data_struct] )
return self._tensorize(__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : dict ) -> Union[str, Any]:
return map_nested(self._recursive_tensorize , __lowerCamelCase , map_list=__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : pa.Table ) -> Mapping:
a = self.numpy_arrow_extractor().extract_row(__lowerCamelCase )
a = self.python_features_decoder.decode_row(__lowerCamelCase )
return self.recursive_tensorize(__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : pa.Table ) -> "torch.Tensor":
a = self.numpy_arrow_extractor().extract_column(__lowerCamelCase )
a = self.python_features_decoder.decode_column(__lowerCamelCase , pa_table.column_names[0] )
a = self.recursive_tensorize(__lowerCamelCase )
a = self._consolidate(__lowerCamelCase )
return column
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : pa.Table ) -> Mapping:
a = self.numpy_arrow_extractor().extract_batch(__lowerCamelCase )
a = self.python_features_decoder.decode_batch(__lowerCamelCase )
a = self.recursive_tensorize(__lowerCamelCase )
for column_name in batch:
a = self._consolidate(batch[column_name] )
return batch
| 700 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : Tuple = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
__lowerCAmelCase : Dict = '|'.join(sys.argv[1:])
__lowerCAmelCase : List[Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : List[Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 662 | 0 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = input('Enter image url: ').strip()
print(F'''Downloading image from {url} ...''')
__lowerCAmelCase : str = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
__lowerCAmelCase : List[Any] = soup.find('meta', {'property': 'og:image'})['content']
__lowerCAmelCase : Dict = requests.get(image_url).content
__lowerCAmelCase : int = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 701 |
def __magic_name__ ( A : int, A : int, A : int ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
a = _modexpt(A, exponent // 2, A ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A, exponent - 1, A )) % modulo_value
def __magic_name__ ( A : int = 1777, A : int = 1855, A : int = 8 ):
'''simple docstring'''
a = base
for _ in range(1, A ):
a = _modexpt(A, A, 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 662 | 0 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__lowerCAmelCase : str = datasets.utils.logging.get_logger(__name__)
class snake_case__ (folder_based_builder.FolderBasedBuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : bool = None
SCREAMING_SNAKE_CASE_ : bool = None
class snake_case__ (folder_based_builder.FolderBasedBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = datasets.Audio()
SCREAMING_SNAKE_CASE_ : Any = """audio"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AudioFolderConfig
SCREAMING_SNAKE_CASE_ : List[str] # definition at the bottom of the script
SCREAMING_SNAKE_CASE_ : str = AudioClassification(audio_column="""audio""" , label_column="""label""" )
__lowerCAmelCase : Dict = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
__lowerCAmelCase : List[Any] = AUDIO_EXTENSIONS
| 702 |
def __magic_name__ ( A : str, A : str ):
'''simple docstring'''
def get_matched_characters(A : str, A : str ) -> str:
a = []
a = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a = int(max(0, i - limit ) )
a = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A )
a = F"""{_stra[0:_stra.index(A )]} {_stra[_stra.index(A ) + 1:]}"""
return "".join(A )
# matching characters
a = get_matched_characters(A, A )
a = get_matched_characters(A, A )
a = len(A )
# transposition
a = (
len([(ca, ca) for ca, ca in zip(A, A ) if ca != ca] ) // 2
)
if not match_count:
a = 0.0
else:
a = (
1
/ 3
* (
match_count / len(A )
+ match_count / len(A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 662 | 0 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
a = {}
a = tokenizer(example["content"], truncation=A )["input_ids"]
a = len(example["content"] ) / len(output["input_ids"] )
return output
__magic_name__ : Dict = HfArgumentParser(PretokenizationArguments)
__magic_name__ : str = parser.parse_args()
if args.num_workers is None:
__magic_name__ : List[Any] = multiprocessing.cpu_count()
__magic_name__ : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__magic_name__ : List[Any] = time.time()
__magic_name__ : str = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__magic_name__ : int = time.time()
__magic_name__ : Optional[int] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__magic_name__ : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 703 |
__lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)}
def __magic_name__ ( A : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) )
def __magic_name__ ( ):
'''simple docstring'''
return sum(
number
for number in range(1000, 1000000 )
if number == digits_fifth_powers_sum(A ) )
if __name__ == "__main__":
print(solution())
| 662 | 0 |
def __magic_name__ ( A : str, A : bool = False ):
'''simple docstring'''
if not isinstance(A, A ):
a = F"""Expected string as input, found {type(A )}"""
raise ValueError(A )
if not isinstance(A, A ):
a = F"""Expected boolean as use_pascal parameter, found {type(A )}"""
raise ValueError(A )
a = input_str.split("_" )
a = 0 if use_pascal else 1
a = words[start_index:]
a = [word[0].upper() + word[1:] for word in words_to_capitalize]
a = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 704 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any=7 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : int=30 , __lowerCamelCase : int=4_00 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=1 / 2_55 , __lowerCamelCase : Optional[int]=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
a = parent
a = batch_size
a = num_channels
a = min_resolution
a = max_resolution
a = do_resize
a = size
a = do_normalize
a = image_mean
a = image_std
a = do_rescale
a = rescale_factor
a = do_pad
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str=False ) -> List[str]:
if not batched:
a = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
a , a = image.size
else:
a , a = image.shape[1], image.shape[2]
if w < h:
a = int(self.size["shortest_edge"] * h / w )
a = self.size["shortest_edge"]
elif w > h:
a = self.size["shortest_edge"]
a = int(self.size["shortest_edge"] * w / h )
else:
a = self.size["shortest_edge"]
a = self.size["shortest_edge"]
else:
a = []
for image in image_inputs:
a , a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> int:
pass
def __UpperCAmelCase ( self : Any ) -> Any:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : Any ) -> List[str]:
# Initialize image_processing
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCAmelCase ( self : Any ) -> List[Any]:
# prepare image and target
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"image_id": 3_97_69, "annotations": target}
# encode them
a = DetaImageProcessor()
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
# prepare image, target and masks_path
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
a = json.loads(f.read() )
a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
a = DetaImageProcessor(format="coco_panoptic" )
a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
a = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify masks
a = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase )
# verify orig_size
a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
| 662 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705 |
def __magic_name__ ( A : list ):
'''simple docstring'''
for i in range(len(A ) - 1, 0, -1 ):
a = False
for j in range(A, 0, -1 ):
if unsorted[j] < unsorted[j - 1]:
a , a = unsorted[j - 1], unsorted[j]
a = True
for j in range(A ):
if unsorted[j] > unsorted[j + 1]:
a , a = unsorted[j + 1], unsorted[j]
a = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(',')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 662 | 0 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ["""vqvae"""]
def __init__( self : Optional[int] , __lowerCamelCase : AutoencoderKL , __lowerCamelCase : UNetaDConditionModel , __lowerCamelCase : Mel , __lowerCamelCase : Union[DDIMScheduler, DDPMScheduler] , ) -> List[Any]:
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase , mel=__lowerCamelCase , vqvae=__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ) -> int:
return 50 if isinstance(self.scheduler , __lowerCamelCase ) else 10_00
@torch.no_grad()
def __call__( self : int , __lowerCamelCase : int = 1 , __lowerCamelCase : str = None , __lowerCamelCase : np.ndarray = None , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = None , __lowerCamelCase : torch.Generator = None , __lowerCamelCase : float = 0 , __lowerCamelCase : float = 0 , __lowerCamelCase : torch.Generator = None , __lowerCamelCase : float = 0 , __lowerCamelCase : torch.Tensor = None , __lowerCamelCase : torch.Tensor = None , __lowerCamelCase : List[Any]=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
a = steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowerCamelCase )
a = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
a = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
a = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowerCamelCase , device=self.device , )
a = noise
a = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowerCamelCase , __lowerCamelCase )
a = self.mel.audio_slice_to_image(__lowerCamelCase )
a = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
a = (input_image / 2_55) * 2 - 1
a = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
a = self.vqvae.encode(torch.unsqueeze(__lowerCamelCase , 0 ) ).latent_dist.sample(
generator=__lowerCamelCase )[0]
a = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
a = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , self.scheduler.timesteps[start_step - 1] )
a = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
a = int(mask_start_secs * pixels_per_second )
a = int(mask_end_secs * pixels_per_second )
a = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowerCamelCase ):
a = self.unet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["sample"]
else:
a = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
if isinstance(self.scheduler , __lowerCamelCase ):
a = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
else:
a = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
a = mask[:, step, :, :mask_start]
if mask_end > 0:
a = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
a = 1 / self.vqvae.config.scaling_factor * images
a = self.vqvae.decode(__lowerCamelCase )["sample"]
a = (images / 2 + 0.5).clamp(0 , 1 )
a = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
a = (images * 2_55).round().astype("uint8" )
a = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowerCamelCase , mode="RGB" ).convert("L" ) for _ in images) )
a = [self.mel.image_to_audio(__lowerCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowerCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowerCamelCase ) )
@torch.no_grad()
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Image.Image] , __lowerCamelCase : int = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowerCamelCase )
self.scheduler.set_timesteps(__lowerCamelCase )
a = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
a = (sample / 2_55) * 2 - 1
a = torch.Tensor(__lowerCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
a = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
a = self.scheduler.alphas_cumprod[t]
a = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
a = 1 - alpha_prod_t
a = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
a = (1 - alpha_prod_t_prev) ** 0.5 * model_output
a = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
a = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : torch.Tensor , __lowerCamelCase : torch.Tensor , __lowerCamelCase : float ) -> torch.Tensor:
a = acos(torch.dot(torch.flatten(__lowerCamelCase ) , torch.flatten(__lowerCamelCase ) ) / torch.norm(__lowerCamelCase ) / torch.norm(__lowerCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowerCamelCase ) + sin(alpha * theta ) * xa / sin(__lowerCamelCase )
| 706 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowerCAmelCase : Optional[Any] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
__lowerCAmelCase : str = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
__lowerCAmelCase : List[Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[List[List[str]]] , __lowerCamelCase : List[List[str]] , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__lowerCamelCase , hypotheses=__lowerCamelCase , min_len=__lowerCamelCase , max_len=__lowerCamelCase )
}
| 662 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : List[str] = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class snake_case__ (_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = """bit"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""preactivation""", """bottleneck"""]
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""SAME""", """VALID"""]
def __init__( self : Optional[Any] , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Any=64 , __lowerCamelCase : str=[2_56, 5_12, 10_24, 20_48] , __lowerCamelCase : Any=[3, 4, 6, 3] , __lowerCamelCase : List[str]="preactivation" , __lowerCamelCase : Union[str, Any]="relu" , __lowerCamelCase : List[str]=None , __lowerCamelCase : int=32 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : Any=False , __lowerCamelCase : List[str]=32 , __lowerCamelCase : Any=1 , __lowerCamelCase : Dict=None , __lowerCamelCase : Any=None , **__lowerCamelCase : Optional[Any] , ) -> Dict:
super().__init__(**__lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
a = global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""" )
a = num_channels
a = embedding_size
a = hidden_sizes
a = depths
a = layer_type
a = hidden_act
a = global_padding
a = num_groups
a = drop_path_rate
a = embedding_dynamic_padding
a = output_stride
a = width_factor
a = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
| 707 |
import argparse
import os
import re
__lowerCAmelCase : Union[str, Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__lowerCAmelCase : Dict = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
__lowerCAmelCase : Any = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __magic_name__ ( A : int, A : bool = False ):
'''simple docstring'''
with open(A, "r", encoding="utf-8" ) as f:
a = f.read()
a = content.split("\n" )
a = []
a = 0
while line_idx < len(A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a = len(re.search(R"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a = sorted(A, key=lambda A : _re_identifier.search(A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(A, "w", encoding="utf-8" ) as f:
f.write("\n".join(A ) )
elif "\n".join(A ) != content:
return True
def __magic_name__ ( A : bool = False ):
'''simple docstring'''
a = [os.path.join(A, A ) for f in os.listdir(A ) if f.endswith(".py" )]
a = [sort_auto_mapping(A, overwrite=A ) for fname in fnames]
if not overwrite and any(A ):
a = [f for f, d in zip(A, A ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {", ".join(A )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 662 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ["""image_processor""", """tokenizer"""]
SCREAMING_SNAKE_CASE_ : int = """BlipImageProcessor"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ) -> Optional[int]:
a = False
super().__init__(__lowerCamelCase , __lowerCamelCase )
a = self.image_processor
def __call__( self : List[str] , __lowerCamelCase : ImageInput = None , __lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[bool, str, PaddingStrategy] = False , __lowerCamelCase : Union[bool, str, TruncationStrategy] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[str, TensorType]] = None , **__lowerCamelCase : List[str] , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
a = self.tokenizer
a = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
return text_encoding
# add pixel_values
a = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
if text is not None:
a = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
else:
a = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCamelCase )
return encoding_image_processor
def __UpperCAmelCase ( self : Optional[Any] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : List[Any] ) -> str:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] , *__lowerCamelCase : int , **__lowerCamelCase : str ) -> List[Any]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
a = self.tokenizer.model_input_names
a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 708 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = '▁'
__lowerCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model'}
__lowerCAmelCase : int = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
__lowerCAmelCase : Any = {
'google/reformer-crime-and-punishment': 52_4288,
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Dict=[] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ) -> None:
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, int]:
a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Optional[Any]:
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : str , __lowerCamelCase : Tuple ) -> List[Any]:
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : int , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict ) -> Any:
return self.sp_model.piece_to_id(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ) -> str:
if index < self.sp_model.get_piece_size():
a = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
a = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 662 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Dict:
a = tempfile.mkdtemp()
a = BlipImageProcessor()
a = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
a = BlipProcessor(__lowerCamelCase , __lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).tokenizer
def __UpperCAmelCase ( self : str , **__lowerCamelCase : List[Any] ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).image_processor
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : Any ) -> int:
a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : List[Any] ) -> int:
a = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
a = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCamelCase , return_tensors="np" )
a = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : Dict ) -> str:
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "lower newer"
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : Any ) -> List[str]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "lower newer"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCamelCase )
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "lower newer"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 709 |
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase : List[str] = [8, 5, 9, 7]
__lowerCAmelCase : str = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ) -> None:
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __UpperCAmelCase ( self : List[str] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __UpperCAmelCase ( self : str ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __UpperCAmelCase ( self : Dict ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __UpperCAmelCase ( self : Dict ) -> dict[int, list[int]]:
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Any ) -> None:
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
a = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def __UpperCAmelCase ( self : Any ) -> str:
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"""
+ " ".join(f"""{it:>8}""" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 0 |
from __future__ import annotations
__lowerCAmelCase : int = '#'
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[Any] ) -> None:
a = {}
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str ) -> None:
a = self._trie
for char in text:
if char not in trie:
a = {}
a = trie[char]
a = True
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : str ) -> tuple | list:
a = self._trie
for char in prefix:
if char in trie:
a = trie[char]
else:
return []
return self._elements(__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : dict ) -> tuple:
a = []
for c, v in d.items():
a = [" "] if c == END else [(c + s) for s in self._elements(__lowerCamelCase )]
result.extend(__lowerCamelCase )
return tuple(__lowerCamelCase )
__lowerCAmelCase : str = Trie()
__lowerCAmelCase : Optional[int] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def __magic_name__ ( A : str ):
'''simple docstring'''
a = trie.find_word(A )
return tuple(string + word for word in suffixes )
def __magic_name__ ( ):
'''simple docstring'''
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 710 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = TypeVar('DatasetType', Dataset, IterableDataset)
def __magic_name__ ( A : List[DatasetType], A : Optional[List[float]] = None, A : Optional[int] = None, A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A, A, A, info=A, split=A, stopping_strategy=A )
def __magic_name__ ( A : List[DatasetType], A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : int = 0, ):
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(A ):
if not isinstance(A, (Dataset, IterableDataset) ):
if isinstance(A, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
a , a = (
(Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset)
)
elif not isinstance(A, A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A, info=A, split=A, axis=A )
else:
return _concatenate_iterable_datasets(A, info=A, split=A, axis=A )
| 662 | 0 |
import json
import sys
def __magic_name__ ( A : str, A : List[str] ):
'''simple docstring'''
with open(A, encoding="utf-8" ) as f:
a = json.load(A )
a = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(A ):
a = results[benchmark_name]
a = benchmark_name.split("/" )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
a = "| metric |"
a = "|--------|"
a = "| new / old (diff) |"
for metric_name in sorted(A ):
a = benchmark_res[metric_name]
a = metric_vals["new"]
a = metric_vals.get("old", A )
a = metric_vals.get("diff", A )
a = F""" {new_val:f}""" if isinstance(A, (int, float) ) else "None"
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(A, (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(A, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(A, "w", encoding="utf-8" ) as f:
f.writelines("\n".join(A ) )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = sys.argv[1]
__lowerCAmelCase : Dict = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 711 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : List[str] = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
__lowerCAmelCase : Any = '▁'
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : str = BigBirdTokenizer
SCREAMING_SNAKE_CASE_ : str = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self : int , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Tuple="[CLS]" , **__lowerCamelCase : Optional[Any] , ) -> List[Any]:
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
a = vocab_file
a = False if not self.vocab_file else True
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 662 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : int = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__lowerCAmelCase : Tuple = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__lowerCAmelCase : str = {'facebook/blenderbot-3B': 128}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : Dict = BlenderbotTokenizer
def __init__( self : Any , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]="replace" , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : int="<unk>" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : List[str]="<mask>" , __lowerCamelCase : Dict=False , __lowerCamelCase : int=True , **__lowerCamelCase : Optional[int] , ) -> List[Any]:
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , )
a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space:
a = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) )
a = add_prefix_space
a = pre_tok_class(**__lowerCamelCase )
a = add_prefix_space
a = "post_processor"
a = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
if tokenizer_component_instance:
a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a = tuple(state["sep"] )
if "cls" in state:
a = tuple(state["cls"] )
a = False
if state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space:
a = add_prefix_space
a = True
if state.get("trim_offsets" , __lowerCamelCase ) != trim_offsets:
a = trim_offsets
a = True
if changes_to_apply:
a = getattr(__lowerCamelCase , state.pop("type" ) )
a = component_class(**__lowerCamelCase )
setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Optional[int] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : str , __lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value
a = value
def __UpperCAmelCase ( self : Any , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : List[str] ) -> BatchEncoding:
a = kwargs.get("is_split_into_words" , __lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , *__lowerCamelCase : List[str] , **__lowerCamelCase : Tuple ) -> BatchEncoding:
a = kwargs.get("is_split_into_words" , __lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
a = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> str:
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : "Conversation" ) -> List[int]:
a = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
a = " ".join(__lowerCamelCase )
a = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
a = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 712 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name", type=A, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", )
parser.add_argument(
"--dataset_config", type=A, default="wikitext-103-raw-v1", help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path", type=A, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", )
parser.add_argument(
"--shard_size", type=A, default=1000, help="Number of entries to go in a single shard.", )
parser.add_argument("--split", type=A, default="train", choices=["train", "test", "validation"] )
parser.add_argument(
"--limit", default=A, type=A, help="Limit the number of shards (used for debugging).", )
parser.add_argument(
"--max_length", type=A, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8.", )
parser.add_argument(
"--output_dir", default="tf-tpu", type=A, help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket.", )
a = parser.parse_args()
return args
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
def fn(A : Tuple ):
return tokenizer(examples["text"] )
return fn
def __magic_name__ ( A : Any ):
'''simple docstring'''
a = []
for i in range(len(tokenized_data["input_ids"] ) ):
a = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
a = tf.train.Features(feature=A )
a = tf.train.Example(features=A )
a = example.SerializeToString()
records.append(A )
return records
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
a = min(len(A ), args.limit )
a = dataset.select(range(A ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a = os.path.join(args.output_dir, args.split )
if not os.path.exists(A ):
os.makedirs(A )
else:
a = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
a = tokenize_function(A )
a = dataset.map(A, batched=A, num_proc=4, remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A : List[Any] ):
# Concatenate all texts.
a = {k: sum(examples[k], [] ) for k in examples.keys()}
a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a = {
k: [t[i : i + args.max_length] for i in range(0, A, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a = dataset_tokenized.map(A, batched=A, batch_size=1000, num_proc=4 )
a = 0
a = 0
for shard in range(0, len(A ), args.shard_size ):
a = grouped_dataset[shard : shard + args.shard_size]
a = len(dataset_snapshot["input_ids"] )
a = os.path.join(A, F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
a = get_serialized_examples(A )
with tf.io.TFRecordWriter(A ) as out_file:
for i in range(len(A ) ):
a = serialized_examples[i]
out_file.write(A )
print("Wrote file {} containing {} records".format(A, A ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""", "w" ) as f:
print(F"""Total {args.split} records: {total_records}""", file=A )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = parse_args()
main(args)
| 662 | 0 |
from typing import Any
import numpy as np
def __magic_name__ ( A : np.ndarray ):
'''simple docstring'''
return np.array_equal(A, matrix.conjugate().T )
def __magic_name__ ( A : np.ndarray, A : np.ndarray ):
'''simple docstring'''
a = v.conjugate().T
a = v_star.dot(A )
assert isinstance(A, np.ndarray )
return (v_star_dot.dot(A )) / (v_star.dot(A ))
def __magic_name__ ( ):
'''simple docstring'''
a = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
a = np.array([[1], [2], [3]] )
assert is_hermitian(A ), F"""{a} is not hermitian."""
print(rayleigh_quotient(A, A ) )
a = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A ), F"""{a} is not hermitian."""
assert rayleigh_quotient(A, A ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 713 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
a = {}
a = tokenizer(example["content"], truncation=A )["input_ids"]
a = len(example["content"] ) / len(output["input_ids"] )
return output
__lowerCAmelCase : Dict = HfArgumentParser(PretokenizationArguments)
__lowerCAmelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCAmelCase : List[Any] = multiprocessing.cpu_count()
__lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__lowerCAmelCase : List[Any] = time.time()
__lowerCAmelCase : str = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : int = time.time()
__lowerCAmelCase : Optional[int] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 662 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.