code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( """The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _UpperCamelCase , ) class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = RobertaConfig SCREAMING_SNAKE_CASE_ : Optional[Any] = """roberta""" def __init__( self : List[str] , __lowerCamelCase : Union[str, Any] ) -> Tuple: super().__init__(__lowerCamelCase ) a = RobertaEmbeddings(__lowerCamelCase ) self.init_weights() @add_start_docstrings( """RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top, also takes care of multi-layer training. """ , _UpperCamelCase , ) class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = RobertaConfig SCREAMING_SNAKE_CASE_ : int = """roberta""" def __init__( self : List[str] , __lowerCamelCase : Tuple ) -> Optional[int]: super().__init__(__lowerCamelCase ) a = config.num_labels a = config.num_hidden_layers a = DeeRobertaModel(__lowerCamelCase ) a = nn.Dropout(config.hidden_dropout_prob ) a = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(__lowerCamelCase ) def __UpperCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : int=None , __lowerCamelCase : Any=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=-1 , __lowerCamelCase : Tuple=False , ) -> str: a = self.num_layers try: a = self.roberta( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , position_ids=__lowerCamelCase , head_mask=__lowerCamelCase , inputs_embeds=__lowerCamelCase , ) a = outputs[1] a = self.dropout(__lowerCamelCase ) a = self.classifier(__lowerCamelCase ) a = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: a = e.message a = e.exit_layer a = outputs[0] if not self.training: a = entropy(__lowerCamelCase ) a = [] a = [] if labels is not None: if self.num_labels == 1: # We are doing regression a = MSELoss() a = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: a = CrossEntropyLoss() a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits a = [] for highway_exit in outputs[-1]: a = highway_exit[0] if not self.training: highway_logits_all.append(__lowerCamelCase ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression a = MSELoss() a = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: a = CrossEntropyLoss() a = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(__lowerCamelCase ) if train_highway: a = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: a = (loss,) + outputs if not self.training: a = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: a = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
662
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __lowerCAmelCase : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8') __lowerCAmelCase : Tuple = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('utf-8').split() __lowerCAmelCase : Dict = '|'.join(sys.argv[1:]) __lowerCAmelCase : List[Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''') __lowerCAmelCase : List[Any] = [x for x in modified_files if regex.match(x)] print(' '.join(relevant_modified_files), end='')
662
1
from ...configuration_utils import PretrainedConfig __lowerCAmelCase : Dict = { 'google/tapas-base-finetuned-sqa': ( 'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json' ), 'google/tapas-base-finetuned-wtq': ( 'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json' ), 'google/tapas-base-finetuned-wikisql-supervised': ( 'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json' ), 'google/tapas-base-finetuned-tabfact': ( 'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json' ), } class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = """tapas""" def __init__( self : int , __lowerCamelCase : Optional[Any]=3_05_22 , __lowerCamelCase : Optional[Any]=7_68 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : List[str]=30_72 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=10_24 , __lowerCamelCase : Any=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Dict=1e-12 , __lowerCamelCase : List[str]=0 , __lowerCamelCase : Dict=10.0 , __lowerCamelCase : List[str]=0 , __lowerCamelCase : int=1.0 , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[Any]=1.0 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[str]=1.0 , __lowerCamelCase : List[str]=1.0 , __lowerCamelCase : List[Any]=False , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Union[str, Any]="ratio" , __lowerCamelCase : int=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Dict=64 , __lowerCamelCase : Any=32 , __lowerCamelCase : List[Any]=False , __lowerCamelCase : str=True , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : Any=None , **__lowerCamelCase : Optional[Any] , ) -> Any: super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_sizes a = initializer_range a = layer_norm_eps # Fine-tuning task hyperparameters a = positive_label_weight a = num_aggregation_labels a = aggregation_loss_weight a = use_answer_as_supervision a = answer_loss_importance a = use_normalized_answer_loss a = huber_loss_delta a = temperature a = aggregation_temperature a = use_gumbel_for_cells a = use_gumbel_for_aggregation a = average_approximation_function a = cell_selection_preference a = answer_loss_cutoff a = max_num_rows a = max_num_columns a = average_logits_per_cell a = select_one_column a = allow_empty_column_selection a = init_cell_selection_weights_to_zero a = reset_position_index_per_cell a = disable_per_token_loss # Aggregation hyperparameters a = aggregation_labels a = no_aggregation_label_index if isinstance(self.aggregation_labels , __lowerCamelCase ): a = {int(__lowerCamelCase ): v for k, v in aggregation_labels.items()}
662
def __magic_name__ ( A : int, A : int, A : int ): '''simple docstring''' if exponent == 1: return base if exponent % 2 == 0: a = _modexpt(A, exponent // 2, A ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(A, exponent - 1, A )) % modulo_value def __magic_name__ ( A : int = 1777, A : int = 1855, A : int = 8 ): '''simple docstring''' a = base for _ in range(1, A ): a = _modexpt(A, A, 10**digits ) return result if __name__ == "__main__": print(F'''{solution() = }''')
662
1
from typing import TYPE_CHECKING from ....utils import _LazyModule __lowerCAmelCase : int = {'tokenization_tapex': ['TapexTokenizer']} if TYPE_CHECKING: from .tokenization_tapex import TapexTokenizer else: import sys __lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
662
def __magic_name__ ( A : str, A : str ): '''simple docstring''' def get_matched_characters(A : str, A : str ) -> str: a = [] a = min(len(_stra ), len(_stra ) ) // 2 for i, l in enumerate(_stra ): a = int(max(0, i - limit ) ) a = int(min(i + limit + 1, len(_stra ) ) ) if l in _stra[left:right]: matched.append(A ) a = F"""{_stra[0:_stra.index(A )]} {_stra[_stra.index(A ) + 1:]}""" return "".join(A ) # matching characters a = get_matched_characters(A, A ) a = get_matched_characters(A, A ) a = len(A ) # transposition a = ( len([(ca, ca) for ca, ca in zip(A, A ) if ca != ca] ) // 2 ) if not match_count: a = 0.0 else: a = ( 1 / 3 * ( match_count / len(A ) + match_count / len(A ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters a = 0 for ca, ca in zip(stra[:4], stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler('hello', 'world'))
662
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase : Tuple = { 'configuration_xmod': [ 'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XmodConfig', 'XmodOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Dict = [ 'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST', 'XmodForCausalLM', 'XmodForMaskedLM', 'XmodForMultipleChoice', 'XmodForQuestionAnswering', 'XmodForSequenceClassification', 'XmodForTokenClassification', 'XmodModel', 'XmodPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys __lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
662
__lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)} def __magic_name__ ( A : int ): '''simple docstring''' return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) ) def __magic_name__ ( ): '''simple docstring''' return sum( number for number in range(1000, 1000000 ) if number == digits_fifth_powers_sum(A ) ) if __name__ == "__main__": print(solution())
662
1
from __future__ import annotations def __magic_name__ ( A : int, A : int ): '''simple docstring''' if b == 0: return (1, 0) ((a) , (a)) = extended_euclid(A, a % b ) a = a // b return (y, x - k * y) def __magic_name__ ( A : int, A : int, A : int, A : int ): '''simple docstring''' ((a) , (a)) = extended_euclid(A, A ) a = na * na a = ra * x * na + ra * y * na return (n % m + m) % m def __magic_name__ ( A : int, A : int ): '''simple docstring''' ((a) , (a)) = extended_euclid(A, A ) if b < 0: a = (b % n + n) % n return b def __magic_name__ ( A : int, A : int, A : int, A : int ): '''simple docstring''' a , a = invert_modulo(A, A ), invert_modulo(A, A ) a = na * na a = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name='chinese_remainder_theorem', verbose=True) testmod(name='chinese_remainder_theorem2', verbose=True) testmod(name='invert_modulo', verbose=True) testmod(name='extended_euclid', verbose=True)
662
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class snake_case__ (unittest.TestCase ): """simple docstring""" def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any=7 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : int=30 , __lowerCamelCase : int=4_00 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=1 / 2_55 , __lowerCamelCase : Optional[int]=True , ) -> str: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33} a = parent a = batch_size a = num_channels a = min_resolution a = max_resolution a = do_resize a = size a = do_normalize a = image_mean a = image_std a = do_rescale a = rescale_factor a = do_pad def __UpperCAmelCase ( self : List[Any] ) -> Any: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str=False ) -> List[str]: if not batched: a = image_inputs[0] if isinstance(__lowerCamelCase , Image.Image ): a , a = image.size else: a , a = image.shape[1], image.shape[2] if w < h: a = int(self.size["shortest_edge"] * h / w ) a = self.size["shortest_edge"] elif w > h: a = self.size["shortest_edge"] a = int(self.size["shortest_edge"] * w / h ) else: a = self.size["shortest_edge"] a = self.size["shortest_edge"] else: a = [] for image in image_inputs: a , a = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0] a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class snake_case__ (_UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = DetaImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]: a = DetaImageProcessingTester(self ) @property def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self : Optional[int] ) -> Tuple: a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) ) self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) ) self.assertTrue(hasattr(__lowerCamelCase , "size" ) ) def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: a = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} ) self.assertEqual(image_processor.do_pad , __lowerCamelCase ) def __UpperCAmelCase ( self : Any ) -> int: pass def __UpperCAmelCase ( self : Any ) -> Any: # Initialize image_processing a = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: # Initialize image_processing a = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) # Test not batched input a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCAmelCase ( self : Any ) -> List[str]: # Initialize image_processing a = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __UpperCAmelCase ( self : Any ) -> List[Any]: # prepare image and target a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: a = json.loads(f.read() ) a = {"image_id": 3_97_69, "annotations": target} # encode them a = DetaImageProcessor() a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" ) # verify pixel values a = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase ) a = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) ) # verify area a = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) ) # verify boxes a = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase ) a = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) ) # verify image_id a = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) ) # verify is_crowd a = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) ) # verify class_labels a = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) ) # verify orig_size a = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) ) # verify size a = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) ) @slow def __UpperCAmelCase ( self : Any ) -> Union[str, Any]: # prepare image, target and masks_path a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: a = json.loads(f.read() ) a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target} a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them a = DetaImageProcessor(format="coco_panoptic" ) a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" ) # verify pixel values a = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase ) a = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) ) # verify area a = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) ) # verify boxes a = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase ) a = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) ) # verify image_id a = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) ) # verify is_crowd a = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) ) # verify class_labels a = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) ) # verify masks a = 82_28_73 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase ) # verify orig_size a = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) ) # verify size a = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
662
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase : Optional[int] = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = { 'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json', } class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = """data2vec-text""" def __init__( self : Union[str, Any] , __lowerCamelCase : int=3_05_22 , __lowerCamelCase : List[str]=7_68 , __lowerCamelCase : int=12 , __lowerCamelCase : str=12 , __lowerCamelCase : List[Any]=30_72 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=5_12 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : int=0.02 , __lowerCamelCase : Optional[int]=1e-12 , __lowerCamelCase : List[str]=1 , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Dict="absolute" , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Dict=None , **__lowerCamelCase : Dict , ) -> List[str]: super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = initializer_range a = layer_norm_eps a = position_embedding_type a = use_cache a = classifier_dropout class snake_case__ (_UpperCamelCase ): """simple docstring""" @property def __UpperCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": a = {0: "batch", 1: "choice", 2: "sequence"} else: a = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
662
def __magic_name__ ( A : list ): '''simple docstring''' for i in range(len(A ) - 1, 0, -1 ): a = False for j in range(A, 0, -1 ): if unsorted[j] < unsorted[j - 1]: a , a = unsorted[j - 1], unsorted[j] a = True for j in range(A ): if unsorted[j] > unsorted[j + 1]: a , a = unsorted[j + 1], unsorted[j] a = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip() __lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(',')] print(F'''{cocktail_shaker_sort(unsorted) = }''')
662
1
import math def __magic_name__ ( A : float, A : float ): '''simple docstring''' if initial_intensity < 0: raise ValueError("The value of intensity cannot be negative" ) # handling of negative values of initial intensity if angle < 0 or angle > 360: raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(A ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name='malus_law')
662
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo __lowerCAmelCase : Optional[Any] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' __lowerCAmelCase : str = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' __lowerCAmelCase : List[Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case__ (datasets.Metric ): """simple docstring""" def __UpperCAmelCase ( self : int ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[List[List[str]]] , __lowerCamelCase : List[List[str]] , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=__lowerCamelCase , hypotheses=__lowerCamelCase , min_len=__lowerCamelCase , max_len=__lowerCamelCase ) }
662
1
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class snake_case__ (datasets.BuilderConfig ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None class snake_case__ (datasets.ArrowBasedBuilder ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = PandasConfig def __UpperCAmelCase ( self : Tuple ) -> List[str]: return datasets.DatasetInfo(features=self.config.features ) def __UpperCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] ) -> Any: if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) a = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__lowerCamelCase , (str, list, tuple) ): a = data_files if isinstance(__lowerCamelCase , __lowerCamelCase ): a = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a = [dl_manager.iter_files(__lowerCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] a = [] for split_name, files in data_files.items(): if isinstance(__lowerCamelCase , __lowerCamelCase ): a = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a = [dl_manager.iter_files(__lowerCamelCase ) for file in files] splits.append(datasets.SplitGenerator(name=__lowerCamelCase , gen_kwargs={"files": files} ) ) return splits def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : pa.Table ) -> pa.Table: if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example a = table_cast(__lowerCamelCase , self.config.features.arrow_schema ) return pa_table def __UpperCAmelCase ( self : str , __lowerCamelCase : List[str] ) -> List[str]: for i, file in enumerate(itertools.chain.from_iterable(__lowerCamelCase ) ): with open(__lowerCamelCase , "rb" ) as f: a = pa.Table.from_pandas(pd.read_pickle(__lowerCamelCase ) ) yield i, self._cast_table(__lowerCamelCase )
662
import argparse import os import re __lowerCAmelCase : Union[str, Any] = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict __lowerCAmelCase : Dict = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings __lowerCAmelCase : Any = re.compile(r'\s*\(\s*"(\S[^"]+)"') def __magic_name__ ( A : int, A : bool = False ): '''simple docstring''' with open(A, "r", encoding="utf-8" ) as f: a = f.read() a = content.split("\n" ) a = [] a = 0 while line_idx < len(A ): if _re_intro_mapping.search(lines[line_idx] ) is not None: a = len(re.search(R"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 a = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": a = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers a = sorted(A, key=lambda A : _re_identifier.search(A ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(A, "w", encoding="utf-8" ) as f: f.write("\n".join(A ) ) elif "\n".join(A ) != content: return True def __magic_name__ ( A : bool = False ): '''simple docstring''' a = [os.path.join(A, A ) for f in os.listdir(A ) if f.endswith(".py" )] a = [sort_auto_mapping(A, overwrite=A ) for fname in fnames] if not overwrite and any(A ): a = [f for f, d in zip(A, A ) if d] raise ValueError( F"""The following files have auto mappings that need sorting: {", ".join(A )}. Run `make style` to fix""" " this." ) if __name__ == "__main__": __lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') __lowerCAmelCase : Optional[Any] = parser.parse_args() sort_all_auto_mappings(not args.check_only)
662
1
import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class snake_case__ (nn.Module ): """simple docstring""" def __init__( self : str ) -> Dict: super().__init__() a = nn.Linear(3 , 4 ) a = nn.BatchNormad(4 ) a = nn.Linear(4 , 5 ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : int ) -> List[str]: return self.lineara(self.batchnorm(self.lineara(__lowerCamelCase ) ) ) class snake_case__ (unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : str ) -> List[str]: a = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(__lowerCamelCase , model.state_dict() ) a = os.path.join(__lowerCamelCase , "index.json" ) self.assertTrue(os.path.isfile(__lowerCamelCase ) ) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: a = os.path.join(__lowerCamelCase , f"""{key}.dat""" ) self.assertTrue(os.path.isfile(__lowerCamelCase ) ) # TODO: add tests on the fact weights are properly loaded def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]: a = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: a = torch.randn(2 , 3 , dtype=__lowerCamelCase ) with TemporaryDirectory() as tmp_dir: a = offload_weight(__lowerCamelCase , "weight" , __lowerCamelCase , {} ) a = os.path.join(__lowerCamelCase , "weight.dat" ) self.assertTrue(os.path.isfile(__lowerCamelCase ) ) self.assertDictEqual(__lowerCamelCase , {"weight": {"shape": [2, 3], "dtype": str(__lowerCamelCase ).split("." )[1]}} ) a = load_offloaded_weight(__lowerCamelCase , index["weight"] ) self.assertTrue(torch.equal(__lowerCamelCase , __lowerCamelCase ) ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Any: a = ModelForTest() a = model.state_dict() a = {k: v for k, v in state_dict.items() if "linear2" not in k} a = {k: v for k, v in state_dict.items() if "linear2" in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(__lowerCamelCase , __lowerCamelCase ) a = OffloadedWeightsLoader(state_dict=__lowerCamelCase , save_folder=__lowerCamelCase ) # Every key is there with the right value self.assertEqual(sorted(__lowerCamelCase ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(__lowerCamelCase , weight_map[key] ) ) a = {k: v for k, v in state_dict.items() if "weight" in k} a = {k: v for k, v in state_dict.items() if "weight" not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(__lowerCamelCase , __lowerCamelCase ) a = OffloadedWeightsLoader(state_dict=__lowerCamelCase , save_folder=__lowerCamelCase ) # Every key is there with the right value self.assertEqual(sorted(__lowerCamelCase ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(__lowerCamelCase , weight_map[key] ) ) with TemporaryDirectory() as tmp_dir: offload_state_dict(__lowerCamelCase , __lowerCamelCase ) # Duplicates are removed a = OffloadedWeightsLoader(state_dict=__lowerCamelCase , save_folder=__lowerCamelCase ) # Every key is there with the right value self.assertEqual(sorted(__lowerCamelCase ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(__lowerCamelCase , weight_map[key] ) ) def __UpperCAmelCase ( self : Union[str, Any] ) -> str: a = {"a.1": 0, "a.10": 1, "a.2": 2} a = extract_submodules_state_dict(__lowerCamelCase , ["a.1", "a.2"] ) self.assertDictEqual(__lowerCamelCase , {"a.1": 0, "a.2": 2} ) a = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2} a = extract_submodules_state_dict(__lowerCamelCase , ["a.1", "a.2"] ) self.assertDictEqual(__lowerCamelCase , {"a.1.a": 0, "a.2.a": 2} )
662
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowerCAmelCase : int = logging.get_logger(__name__) __lowerCAmelCase : Optional[int] = '▁' __lowerCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model'} __lowerCAmelCase : int = { 'vocab_file': { 'google/reformer-crime-and-punishment': ( 'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model' ) } } __lowerCAmelCase : Any = { 'google/reformer-crime-and-punishment': 52_4288, } class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""] def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Dict=[] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ) -> None: a = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , ) a = vocab_file a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCamelCase ) @property def __UpperCAmelCase ( self : Optional[int] ) -> int: return self.sp_model.get_piece_size() def __UpperCAmelCase ( self : Tuple ) -> Dict[str, int]: a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[Any] ) -> Optional[Any]: a = self.__dict__.copy() a = None return state def __setstate__( self : str , __lowerCamelCase : Tuple ) -> List[Any]: a = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): a = {} a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __UpperCAmelCase ( self : int , __lowerCamelCase : str ) -> List[str]: return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict ) -> Any: return self.sp_model.piece_to_id(__lowerCamelCase ) def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ) -> str: if index < self.sp_model.get_piece_size(): a = self.sp_model.IdToPiece(__lowerCamelCase ) return token def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> List[Any]: a = [] a = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCamelCase ) + token a = [] else: current_sub_tokens.append(__lowerCamelCase ) out_string += self.sp_model.decode(__lowerCamelCase ) return out_string.strip() def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCamelCase , "wb" ) as fi: a = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (out_vocab_file,)
662
1
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name __lowerCAmelCase : List[str] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n' def __magic_name__ ( A : str, A : Optional[Any], A : Optional[int]=8 ): '''simple docstring''' a = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 a = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : Optional[Any] , __lowerCamelCase : UNetaDConditionModel , __lowerCamelCase : DDPMScheduler , __lowerCamelCase : VQModel , ) -> List[str]: super().__init__() self.register_modules( unet=__lowerCamelCase , scheduler=__lowerCamelCase , movq=__lowerCamelCase , ) a = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __UpperCAmelCase ( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ) -> Optional[int]: if latents is None: a = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase , dtype=__lowerCamelCase ) else: if latents.shape != shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) a = latents.to(__lowerCamelCase ) a = latents * scheduler.init_noise_sigma return latents def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[str]=0 ) -> Union[str, Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) a = torch.device(f"""cuda:{gpu_id}""" ) a = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Optional[Any]=0 ) -> Union[str, Any]: if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) a = torch.device(f"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=__lowerCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) a = None for cpu_offloaded_model in [self.unet, self.movq]: a , a = cpu_offload_with_hook(__lowerCamelCase , __lowerCamelCase , prev_module_hook=__lowerCamelCase ) # We'll offload the last model manually. a = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __UpperCAmelCase ( self : List[str] ) -> List[str]: if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(__lowerCamelCase , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(__lowerCamelCase ) def __call__( self : Union[str, Any] , __lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 1_00 , __lowerCamelCase : float = 4.0 , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ) -> List[Any]: a = self._execution_device a = guidance_scale > 1.0 if isinstance(__lowerCamelCase , __lowerCamelCase ): a = torch.cat(__lowerCamelCase , dim=0 ) if isinstance(__lowerCamelCase , __lowerCamelCase ): a = torch.cat(__lowerCamelCase , dim=0 ) if isinstance(__lowerCamelCase , __lowerCamelCase ): a = torch.cat(__lowerCamelCase , dim=0 ) a = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: a = image_embeds.repeat_interleave(__lowerCamelCase , dim=0 ) a = negative_image_embeds.repeat_interleave(__lowerCamelCase , dim=0 ) a = hint.repeat_interleave(__lowerCamelCase , dim=0 ) a = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__lowerCamelCase ) a = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__lowerCamelCase ) self.scheduler.set_timesteps(__lowerCamelCase , device=__lowerCamelCase ) a = self.scheduler.timesteps a = self.movq.config.latent_channels a , a = downscale_height_and_width(__lowerCamelCase , __lowerCamelCase , self.movq_scale_factor ) # create initial latent a = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.scheduler , ) for i, t in enumerate(self.progress_bar(__lowerCamelCase ) ): # expand the latents if we are doing classifier free guidance a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents a = {"image_embeds": image_embeds, "hint": hint} a = self.unet( sample=__lowerCamelCase , timestep=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , added_cond_kwargs=__lowerCamelCase , return_dict=__lowerCamelCase , )[0] if do_classifier_free_guidance: a , a = noise_pred.split(latents.shape[1] , dim=1 ) a , a = noise_pred.chunk(2 ) a , a = variance_pred.chunk(2 ) a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) a = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): a , a = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 a = self.scheduler.step( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase , )[0] # post-processing a = self.movq.decode(__lowerCamelCase , force_not_quantize=__lowerCamelCase )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: a = image * 0.5 + 0.5 a = image.clamp(0 , 1 ) a = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": a = self.numpy_to_pil(__lowerCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__lowerCamelCase )
662
from __future__ import annotations import time import numpy as np __lowerCAmelCase : List[str] = [8, 5, 9, 7] __lowerCAmelCase : str = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] __lowerCAmelCase : Optional[Any] = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class snake_case__ : """simple docstring""" def __init__( self : Any , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ) -> None: a = claim_vector a = allocated_resources_table a = maximum_claim_table def __UpperCAmelCase ( self : List[str] ) -> list[int]: return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def __UpperCAmelCase ( self : str ) -> list[int]: return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def __UpperCAmelCase ( self : Dict ) -> list[list[int]]: return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def __UpperCAmelCase ( self : Dict ) -> dict[int, list[int]]: return {self.__need().index(__lowerCamelCase ): i for i in self.__need()} def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Any ) -> None: a = self.__need() a = self.__allocated_resources_table a = self.__available_resources() a = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("_" * 50 + "\n" ) while need_list: a = False for each_need in need_list: a = True for index, need in enumerate(__lowerCamelCase ): if need > available_resources[index]: a = False break if execution: a = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: a = original_need_index print(f"""Process {process_number + 1} is executing.""" ) # remove the process run from stack need_list.remove(__lowerCamelCase ) # update available/freed resources stack a = np.array(__lowerCamelCase ) + np.array( alloc_resources_table[process_number] ) print( "Updated available resource stack for processes: " + " ".join([str(__lowerCamelCase ) for x in available_resources] ) ) break if safe: print("The process is in a safe state.\n" ) else: print("System in unsafe state. Aborting...\n" ) break def __UpperCAmelCase ( self : Any ) -> str: print(" " * 9 + "Allocated Resource Table" ) for item in self.__allocated_resources_table: print( f"""P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}""" + " ".join(f"""{it:>8}""" for it in item ) + "\n" ) print(" " * 9 + "System Resource Table" ) for item in self.__maximum_claim_table: print( f"""P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}""" + " ".join(f"""{it:>8}""" for it in item ) + "\n" ) print( "Current Usage by Active Processes: " + " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) ) print( "Initial Available Resources: " + " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
662
1
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class snake_case__ : """simple docstring""" def __init__( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int]=99 , __lowerCamelCase : Any=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Union[str, Any]=9 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Dict=False , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Any=37 , __lowerCamelCase : Any=8 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=0.002 , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Any=0 , __lowerCamelCase : Union[str, Any]=0 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[str]=None , ) -> Union[str, Any]: a = parent a = batch_size a = encoder_seq_length a = decoder_seq_length # For common tests a = self.decoder_seq_length a = is_training a = use_attention_mask a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = d_ff a = relative_attention_num_buckets a = dropout_rate a = initializer_factor a = eos_token_id a = pad_token_id a = decoder_start_token_id a = None a = decoder_layers def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]: return TaConfig.from_pretrained("google/umt5-base" ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[str]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=None , ) -> Optional[Any]: if attention_mask is None: a = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: a = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__lowerCamelCase ) if decoder_head_mask is None: a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__lowerCamelCase ) if cross_attn_head_mask is None: a = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=__lowerCamelCase ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def __UpperCAmelCase ( self : Optional[int] ) -> List[str]: a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input a = input_ids.clamp(self.pad_token_id + 1 ) a = decoder_input_ids.clamp(self.pad_token_id + 1 ) a = self.get_config() a = config.num_attention_heads a = self.prepare_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return config, input_dict def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]: a , a = self.prepare_config_and_inputs() return config, inputs_dict def __UpperCAmelCase ( self : str ) -> int: return TaConfig( vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict: return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , ) -> str: a = UMTaModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() a = model( input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase , decoder_attention_mask=__lowerCamelCase , ) a = model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase ) a = result.last_hidden_state a = result.past_key_values a = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(__lowerCamelCase ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def __UpperCAmelCase ( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Tuple , ) -> List[Any]: a = UMTaModel(config=__lowerCamelCase ).get_decoder().to(__lowerCamelCase ).eval() # first forward pass a = model(__lowerCamelCase , use_cache=__lowerCamelCase ) a = model(__lowerCamelCase ) a = model(__lowerCamelCase , use_cache=__lowerCamelCase ) self.parent.assertTrue(len(__lowerCamelCase ) == len(__lowerCamelCase ) ) self.parent.assertTrue(len(__lowerCamelCase ) == len(__lowerCamelCase ) + 1 ) a , a = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids a = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and a = torch.cat([input_ids, next_tokens] , dim=-1 ) a = model(__lowerCamelCase )["last_hidden_state"] a = model(__lowerCamelCase , past_key_values=__lowerCamelCase )["last_hidden_state"] # select random slice a = ids_tensor((1,) , output_from_past.shape[-1] ).item() a = output_from_no_past[:, -1, random_slice_idx].detach() a = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) ) def __UpperCAmelCase ( self : int , __lowerCamelCase : str , __lowerCamelCase : int , ) -> List[str]: a = UMTaModel(config=__lowerCamelCase ).to(__lowerCamelCase ).half().eval() a = model(**__lowerCamelCase )["last_hidden_state"] self.parent.assertFalse(torch.isnan(__lowerCamelCase ).any().item() ) @require_torch class snake_case__ (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ : int = (UMTaForConditionalGeneration,) if is_torch_available() else () SCREAMING_SNAKE_CASE_ : str = ( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ : Optional[int] = True SCREAMING_SNAKE_CASE_ : Union[str, Any] = False SCREAMING_SNAKE_CASE_ : List[Any] = False SCREAMING_SNAKE_CASE_ : List[Any] = True SCREAMING_SNAKE_CASE_ : Union[str, Any] = True # The small UMT5 model needs higher percentages for CPU/MP tests SCREAMING_SNAKE_CASE_ : Tuple = [0.8, 0.9] def __UpperCAmelCase ( self : Dict ) -> int: a = UMTaModelTester(self ) @unittest.skip("Test has a segmentation fault on torch 1.8.0" ) def __UpperCAmelCase ( self : int ) -> int: a = self.model_tester.prepare_config_and_inputs() a = UMTaModel(config_and_inputs[0] ).to(__lowerCamelCase ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( __lowerCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=__lowerCamelCase , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , ) @unittest.skipIf(torch_device == "cpu" , "Cant do half precision" ) def __UpperCAmelCase ( self : int ) -> Optional[int]: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*__lowerCamelCase ) def __UpperCAmelCase ( self : int ) -> Optional[int]: a = ["encoder_attentions", "decoder_attentions", "cross_attentions"] a = self.model_tester.prepare_config_and_inputs() a = config_and_inputs[0] a = UMTaForConditionalGeneration(__lowerCamelCase ).eval() model.to(__lowerCamelCase ) a = { "head_mask": torch.zeros(config.num_layers , config.num_heads , device=__lowerCamelCase ), "decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCamelCase ), "cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCamelCase ), } for attn_name, (name, mask) in zip(__lowerCamelCase , head_masking.items() ): a = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": a = torch.ones( config.num_decoder_layers , config.num_heads , device=__lowerCamelCase ) a = model.generate( config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=__lowerCamelCase , return_dict_in_generate=__lowerCamelCase , **__lowerCamelCase , ) # We check the state of decoder_attentions and cross_attentions just from the last step a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases." ) def __UpperCAmelCase ( self : Optional[Any] ) -> int: pass @require_torch @require_sentencepiece @require_tokenizers class snake_case__ (unittest.TestCase ): """simple docstring""" @slow @unittest.skip( "Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" ) def __UpperCAmelCase ( self : str ) -> str: a = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=__lowerCamelCase ).to(__lowerCamelCase ) a = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=__lowerCamelCase , legacy=__lowerCamelCase ) a = [ "Bonjour monsieur <extra_id_0> bien <extra_id_1>.", "No se como puedo <extra_id_0>.", "This is the reason why we <extra_id_0> them.", "The <extra_id_0> walks in <extra_id_1>, seats", "A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.", ] a = tokenizer(__lowerCamelCase , return_tensors="pt" , padding=__lowerCamelCase ).input_ids # fmt: off a = torch.tensor( [ [ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1], ] ) # fmt: on torch.testing.assert_allclose(__lowerCamelCase , __lowerCamelCase ) a = model.generate(input_ids.to(__lowerCamelCase ) ) a = [ "<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>", "<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", ] a = tokenizer.batch_decode(__lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase )
662
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal __lowerCAmelCase : List[Any] = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = TypeVar('DatasetType', Dataset, IterableDataset) def __magic_name__ ( A : List[DatasetType], A : Optional[List[float]] = None, A : Optional[int] = None, A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ): '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("Unable to interleave an empty list of datasets." ) for i, dataset in enumerate(A ): if not isinstance(A, (Dataset, IterableDataset) ): if isinstance(A, (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ "is an empty dataset dictionary." ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(A )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" ) if i == 0: a , a = ( (Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset) ) elif not isinstance(A, A ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" ) if dataset_type is Dataset: return _interleave_map_style_datasets( A, A, A, info=A, split=A, stopping_strategy=A ) else: return _interleave_iterable_datasets( A, A, A, info=A, split=A, stopping_strategy=A ) def __magic_name__ ( A : List[DatasetType], A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : int = 0, ): '''simple docstring''' if not dsets: raise ValueError("Unable to concatenate an empty list of datasets." ) for i, dataset in enumerate(A ): if not isinstance(A, (Dataset, IterableDataset) ): if isinstance(A, (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ "is an empty dataset dictionary." ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(A )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" ) if i == 0: a , a = ( (Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset) ) elif not isinstance(A, A ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if dataset_type is Dataset: return _concatenate_map_style_datasets(A, info=A, split=A, axis=A ) else: return _concatenate_iterable_datasets(A, info=A, split=A, axis=A )
662
1
import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def __magic_name__ ( A : Union[str, Any] ): '''simple docstring''' a = fname.split(os.path.sep )[-1] return re.search(R"^(.*)_\d+\.jpg$", A ).groups()[0] class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None ) -> Tuple: a = file_names a = image_transform a = label_to_id def __len__( self : Any ) -> Tuple: return len(self.file_names ) def __getitem__( self : List[Any] , __lowerCamelCase : List[Any] ) -> int: a = self.file_names[idx] a = PIL.Image.open(__lowerCamelCase ) a = raw_image.convert("RGB" ) if self.image_transform is not None: a = self.image_transform(__lowerCamelCase ) a = extract_label(__lowerCamelCase ) if self.label_to_id is not None: a = self.label_to_id[label] return {"image": image, "label": label} def __magic_name__ ( A : str, A : int ): '''simple docstring''' if args.with_tracking: a = Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir ) else: a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs a = config["lr"] a = int(config["num_epochs"] ) a = int(config["seed"] ) a = int(config["batch_size"] ) a = config["image_size"] if not isinstance(A, (list, tuple) ): a = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps, "isdigit" ): if args.checkpointing_steps == "epoch": a = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): a = int(args.checkpointing_steps ) else: raise ValueError( F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" ) else: a = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: a = os.path.split(A )[-1].split("." )[0] accelerator.init_trackers(A, A ) # Grab all the image filenames a = [os.path.join(args.data_dir, A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )] # Build the label correspondences a = [extract_label(A ) for fname in file_names] a = list(set(A ) ) id_to_label.sort() a = {lbl: i for i, lbl in enumerate(A )} # Set the seed before splitting the data. np.random.seed(A ) torch.manual_seed(A ) torch.cuda.manual_seed_all(A ) # Split our filenames between train and validation a = np.random.permutation(len(A ) ) a = int(0.8 * len(A ) ) a = random_perm[:cut] a = random_perm[cut:] # For training we use a simple RandomResizedCrop a = Compose([RandomResizedCrop(A, scale=(0.5, 1.0) ), ToTensor()] ) a = PetsDataset( [file_names[i] for i in train_split], image_transform=A, label_to_id=A ) # For evaluation, we use a deterministic Resize a = Compose([Resize(A ), ToTensor()] ) a = PetsDataset([file_names[i] for i in eval_split], image_transform=A, label_to_id=A ) # Instantiate dataloaders. a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 ) a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) a = create_model("resnet50d", pretrained=A, num_classes=len(A ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). a = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): a = False for param in model.get_classifier().parameters(): a = True # We normalize the batches of images to be a bit faster. a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device ) a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer a = torch.optim.Adam(params=model.parameters(), lr=lr / 25 ) # Instantiate learning rate scheduler a = OneCycleLR(optimizer=A, max_lr=A, epochs=A, steps_per_epoch=len(A ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. a , a , a , a , a = accelerator.prepare( A, A, A, A, A ) # We need to keep track of how many total steps we have iterated over a = 0 # We also need to keep track of the starting epoch so files are named properly a = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" ) accelerator.load_state(args.resume_from_checkpoint ) a = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` a = os.path.splitext(A )[0] if "epoch" in training_difference: a = int(training_difference.replace("epoch_", "" ) ) + 1 a = None else: a = int(training_difference.replace("step_", "" ) ) a = resume_step // len(A ) resume_step -= starting_epoch * len(A ) # Now we train the model for epoch in range(A, A ): model.train() if args.with_tracking: a = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step a = accelerator.skip_first_batches(A, A ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader a = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. a = {k: v.to(accelerator.device ) for k, v in batch.items()} a = (batch["image"] - mean) / std a = model(A ) a = torch.nn.functional.cross_entropy(A, batch["label"] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(A ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(A, A ): a = F"""step_{overall_step}""" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: a = os.path.join(args.output_dir, A ) accelerator.save_state(A ) model.eval() a = 0 a = 0 for step, batch in enumerate(A ): # We could avoid this line since we set the accelerator with `device_placement=True`. a = {k: v.to(accelerator.device ) for k, v in batch.items()} a = (batch["image"] - mean) / std with torch.no_grad(): a = model(A ) a = outputs.argmax(dim=-1 ) a , a = accelerator.gather_for_metrics((predictions, batch["label"]) ) a = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() a = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" ) if args.with_tracking: accelerator.log( { "accuracy": 100 * eval_metric, "train_loss": total_loss.item() / len(A ), "epoch": epoch, }, step=A, ) if checkpointing_steps == "epoch": a = F"""epoch_{epoch}""" if args.output_dir is not None: a = os.path.join(args.output_dir, A ) accelerator.save_state(A ) if args.with_tracking: accelerator.end_training() def __magic_name__ ( ): '''simple docstring''' a = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument("--data_dir", required=A, help="The data folder on disk." ) parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." ) parser.add_argument( "--mixed_precision", type=A, default=A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." ) parser.add_argument( "--checkpointing_steps", type=A, default=A, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--output_dir", type=A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", ) parser.add_argument( "--resume_from_checkpoint", type=A, default=A, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", ) parser.add_argument( "--project_dir", type=A, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", ) a = parser.parse_args() a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224} training_function(A, A ) if __name__ == "__main__": main()
662
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: __lowerCAmelCase : Optional[int] = None __lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) __lowerCAmelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} __lowerCAmelCase : List[Any] = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), }, 'tokenizer_file': { 'google/bigbird-roberta-base': ( 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json' ), 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json' ), }, } __lowerCAmelCase : List[str] = { 'google/bigbird-roberta-base': 4096, 'google/bigbird-roberta-large': 4096, 'google/bigbird-base-trivia-itc': 4096, } __lowerCAmelCase : Any = '▁' class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : str = BigBirdTokenizer SCREAMING_SNAKE_CASE_ : str = ["""input_ids""", """attention_mask"""] SCREAMING_SNAKE_CASE_ : List[int] = [] def __init__( self : int , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Tuple="[CLS]" , **__lowerCamelCase : Optional[Any] , ) -> List[Any]: a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( __lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , ) a = vocab_file a = False if not self.vocab_file else True def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1] def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(__lowerCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ): copyfile(self.vocab_file , __lowerCamelCase ) return (out_vocab_file,)
662
1
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class snake_case__ (unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Tuple ) -> Optional[int]: a = ["a", "b", "c"] # Defaults to last layer if both are None a , a = get_aligned_output_features_output_indices(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) self.assertEqual(__lowerCamelCase , ["c"] ) self.assertEqual(__lowerCamelCase , [2] ) # Out indices set to match out features a , a = get_aligned_output_features_output_indices(["a", "c"] , __lowerCamelCase , __lowerCamelCase ) self.assertEqual(__lowerCamelCase , ["a", "c"] ) self.assertEqual(__lowerCamelCase , [0, 2] ) # Out features set to match out indices a , a = get_aligned_output_features_output_indices(__lowerCamelCase , [0, 2] , __lowerCamelCase ) self.assertEqual(__lowerCamelCase , ["a", "c"] ) self.assertEqual(__lowerCamelCase , [0, 2] ) # Out features selected from negative indices a , a = get_aligned_output_features_output_indices(__lowerCamelCase , [-3, -1] , __lowerCamelCase ) self.assertEqual(__lowerCamelCase , ["a", "c"] ) self.assertEqual(__lowerCamelCase , [-3, -1] ) def __UpperCAmelCase ( self : Tuple ) -> Tuple: # Stage names must be set with self.assertRaises(__lowerCamelCase ): verify_out_features_out_indices(["a", "b"] , (0, 1) , __lowerCamelCase ) # Out features must be a list with self.assertRaises(__lowerCamelCase ): verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] ) # Out features must be a subset of stage names with self.assertRaises(__lowerCamelCase ): verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] ) # Out indices must be a list or tuple with self.assertRaises(__lowerCamelCase ): verify_out_features_out_indices(__lowerCamelCase , 0 , ["a", "b"] ) # Out indices must be a subset of stage names with self.assertRaises(__lowerCamelCase ): verify_out_features_out_indices(__lowerCamelCase , (0, 1) , ["a"] ) # Out features and out indices must be the same length with self.assertRaises(__lowerCamelCase ): verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] ) # Out features should match out indices with self.assertRaises(__lowerCamelCase ): verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] ) # Out features and out indices should be in order with self.assertRaises(__lowerCamelCase ): verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] ) # Check passes with valid inputs verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] ) def __UpperCAmelCase ( self : Tuple ) -> List[Any]: a = BackboneMixin() a = ["a", "b", "c"] a = ["a", "c"] a = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly a = ["a", "b"] self.assertEqual(backbone.out_features , ["a", "b"] ) self.assertEqual(backbone.out_indices , [0, 1] ) a = [-3, -1] self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [-3, -1] )
662
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer __lowerCAmelCase : List[Any] = logging.getLogger(__name__) def __magic_name__ ( ): '''simple docstring''' a = argparse.ArgumentParser( description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." ) parser.add_argument( "--dataset_name", type=A, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", ) parser.add_argument( "--dataset_config", type=A, default="wikitext-103-raw-v1", help="Configuration name of the dataset." ) parser.add_argument( "--tokenizer_name_or_path", type=A, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", ) parser.add_argument( "--shard_size", type=A, default=1000, help="Number of entries to go in a single shard.", ) parser.add_argument("--split", type=A, default="train", choices=["train", "test", "validation"] ) parser.add_argument( "--limit", default=A, type=A, help="Limit the number of shards (used for debugging).", ) parser.add_argument( "--max_length", type=A, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum" " sequence length that is a multiple of 8.", ) parser.add_argument( "--output_dir", default="tf-tpu", type=A, help="Output directory where the TFRecord shards will be saved. If the" " path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord" " shards will be directly saved to a Google Cloud Storage bucket.", ) a = parser.parse_args() return args def __magic_name__ ( A : List[str] ): '''simple docstring''' def fn(A : Tuple ): return tokenizer(examples["text"] ) return fn def __magic_name__ ( A : Any ): '''simple docstring''' a = [] for i in range(len(tokenized_data["input_ids"] ) ): a = { "input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ), "attention_mask": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ), } a = tf.train.Features(feature=A ) a = tf.train.Example(features=A ) a = example.SerializeToString() records.append(A ) return records def __magic_name__ ( A : Union[str, Any] ): '''simple docstring''' a = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split ) if args.limit is not None: a = min(len(A ), args.limit ) a = dataset.select(range(A ) ) print(F"""Limiting the dataset to {args.limit} entries.""" ) a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) a = os.path.join(args.output_dir, args.split ) if not os.path.exists(A ): os.makedirs(A ) else: a = os.path.join(args.output_dir, args.split ) # Tokenize the whole dataset at once. a = tokenize_function(A ) a = dataset.map(A, batched=A, num_proc=4, remove_columns=["text"] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(A : List[Any] ): # Concatenate all texts. a = {k: sum(examples[k], [] ) for k in examples.keys()} a = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 a = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. a = { k: [t[i : i + args.max_length] for i in range(0, A, args.max_length )] for k, t in concatenated_examples.items() } return result a = dataset_tokenized.map(A, batched=A, batch_size=1000, num_proc=4 ) a = 0 a = 0 for shard in range(0, len(A ), args.shard_size ): a = grouped_dataset[shard : shard + args.shard_size] a = len(dataset_snapshot["input_ids"] ) a = os.path.join(A, F"""dataset-{shard_count}-{records_containing}.tfrecord""" ) a = get_serialized_examples(A ) with tf.io.TFRecordWriter(A ) as out_file: for i in range(len(A ) ): a = serialized_examples[i] out_file.write(A ) print("Wrote file {} containing {} records".format(A, A ) ) shard_count += 1 total_records += records_containing with open(F"""split-{args.split}-records-count.txt""", "w" ) as f: print(F"""Total {args.split} records: {total_records}""", file=A ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = parse_args() main(args)
662
1
def __magic_name__ ( A : float ): '''simple docstring''' if edge <= 0 or not isinstance(A, A ): raise ValueError("Length must be a positive." ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def __magic_name__ ( A : float ): '''simple docstring''' if edge <= 0 or not isinstance(A, A ): raise ValueError("Length must be a positive." ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
662
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __magic_name__ ( A : List[str] ): '''simple docstring''' a = {} a = tokenizer(example["content"], truncation=A )["input_ids"] a = len(example["content"] ) / len(output["input_ids"] ) return output __lowerCAmelCase : Dict = HfArgumentParser(PretokenizationArguments) __lowerCAmelCase : str = parser.parse_args() if args.num_workers is None: __lowerCAmelCase : List[Any] = multiprocessing.cpu_count() __lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir) __lowerCAmelCase : List[Any] = time.time() __lowerCAmelCase : str = load_dataset(args.dataset_name, split='train') print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') __lowerCAmelCase : int = time.time() __lowerCAmelCase : Optional[int] = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ 'repo_name', 'path', 'copies', 'size', 'content', 'license', 'hash', 'line_mean', 'line_max', 'alpha_frac', 'autogenerated', ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') __lowerCAmelCase : Tuple = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
662
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase : str = { 'configuration_blenderbot_small': [ 'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotSmallConfig', 'BlenderbotSmallOnnxConfig', ], 'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = ['BlenderbotSmallTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Tuple = [ 'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotSmallForCausalLM', 'BlenderbotSmallForConditionalGeneration', 'BlenderbotSmallModel', 'BlenderbotSmallPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] = [ 'TFBlenderbotSmallForConditionalGeneration', 'TFBlenderbotSmallModel', 'TFBlenderbotSmallPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : int = [ 'FlaxBlenderbotSmallForConditionalGeneration', 'FlaxBlenderbotSmallModel', 'FlaxBlenderbotSmallPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys __lowerCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
662
import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList __lowerCAmelCase : Union[str, Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif'] class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=1 ) -> Union[str, Any]: a = tokenizer a = dataset a = len(__lowerCamelCase ) if n_tasks is None else n_tasks a = n_copies def __iter__( self : Tuple ) -> str: a = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() ) a = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]: a = start_length a = eof_strings a = tokenizer def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]: a = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) a = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(__lowerCamelCase ) def __magic_name__ ( A : List[Any] ): '''simple docstring''' a = re.split("(%s)" % "|".join(A ), A ) # last string should be "" return "".join(string_list[:-2] ) def __magic_name__ ( A : Union[str, Any], A : Optional[Any], A : List[Any], A : Optional[Any], A : List[str], A : List[Any]=20, **A : Union[str, Any] ): '''simple docstring''' a = defaultdict(A ) # dict of list of generated tokens for step, batch in tqdm(enumerate(A ) ): with torch.no_grad(): a = batch["ids"].shape[-1] a = accelerator.unwrap_model(A ).generate( input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=A, **A ) # each task is generated batch_size times a = batch["task_id"].repeat(A ) a = accelerator.pad_across_processes( A, dim=1, pad_index=tokenizer.pad_token_id ) a , a = accelerator.gather((generated_tokens, generated_tasks) ) a = generated_tokens.cpu().numpy() a = generated_tasks.cpu().numpy() for task, generated_tokens in zip(A, A ): gen_token_dict[task].append(A ) a = [[] for _ in range(A )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: a = tokenizer.decode(A, skip_special_tokens=A, clean_up_tokenization_spaces=A ) code_gens[task].append(remove_last_block(A ) ) return code_gens def __magic_name__ ( ): '''simple docstring''' a = HfArgumentParser(A ) a = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric a = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing a = "false" if args.num_workers is None: a = multiprocessing.cpu_count() # Use dataset load to feed to accelerate a = Accelerator() set_seed(args.seed, device_specific=A ) # Load model and tokenizer a = AutoTokenizer.from_pretrained(args.model_ckpt ) a = tokenizer.eos_token a = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings a = { "do_sample": args.do_sample, "temperature": args.temperature, "max_new_tokens": args.max_new_tokens, "top_p": args.top_p, "top_k": args.top_k, "stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0, A, A )] ), } # Load evaluation dataset and metric a = load_dataset("openai_humaneval" ) a = load_metric("code_eval" ) a = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] ) a = args.n_samples // args.batch_size a = TokenizedDataset(A, human_eval["test"], n_copies=A, n_tasks=A ) # do not confuse args.batch_size, which is actually the num_return_sequences a = DataLoader(A, batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: a = code_eval_metric.compute(references=[""], predictions=[[""]] ) except ValueError as exception: print( "Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`" " flag to enable code evaluation." ) raise exception a , a = accelerator.prepare(A, A ) a = complete_code( A, A, A, A, n_tasks=A, batch_size=args.batch_size, **A, ) if accelerator.is_main_process: a = [] for task in tqdm(range(A ) ): a = human_eval["test"][task]["test"] a = F"""check({human_eval["test"][task]["entry_point"]})""" references.append("\n" + test_func + "\n" + entry_point ) # Evaluate completions with "code_eval" metric a , a = code_eval_metric.compute( references=A, predictions=A, num_workers=args.num_workers ) print(F"""Results: {pass_at_k}""" ) # Save results to json file with open(args.output_file, "w" ) as fp: json.dump(A, A ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
662
1
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal __lowerCAmelCase : List[Any] = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = TypeVar('DatasetType', Dataset, IterableDataset) def __magic_name__ ( A : List[DatasetType], A : Optional[List[float]] = None, A : Optional[int] = None, A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ): '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("Unable to interleave an empty list of datasets." ) for i, dataset in enumerate(A ): if not isinstance(A, (Dataset, IterableDataset) ): if isinstance(A, (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ "is an empty dataset dictionary." ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(A )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" ) if i == 0: a , a = ( (Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset) ) elif not isinstance(A, A ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" ) if dataset_type is Dataset: return _interleave_map_style_datasets( A, A, A, info=A, split=A, stopping_strategy=A ) else: return _interleave_iterable_datasets( A, A, A, info=A, split=A, stopping_strategy=A ) def __magic_name__ ( A : List[DatasetType], A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : int = 0, ): '''simple docstring''' if not dsets: raise ValueError("Unable to concatenate an empty list of datasets." ) for i, dataset in enumerate(A ): if not isinstance(A, (Dataset, IterableDataset) ): if isinstance(A, (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ "is an empty dataset dictionary." ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(A )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" ) if i == 0: a , a = ( (Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset) ) elif not isinstance(A, A ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if dataset_type is Dataset: return _concatenate_map_style_datasets(A, info=A, split=A, axis=A ) else: return _concatenate_iterable_datasets(A, info=A, split=A, axis=A )
662
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase : Any = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
662
1
from __future__ import annotations from PIL import Image # Define glider example __lowerCAmelCase : Dict = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example __lowerCAmelCase : Tuple = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def __magic_name__ ( A : list[list[int]] ): '''simple docstring''' a = [] for i in range(len(A ) ): a = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours a = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(A ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(A ) - 1: neighbour_count += cells[i + 1][j] if i < len(A ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. a = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(A ) return next_generation def __magic_name__ ( A : list[list[int]], A : int ): '''simple docstring''' a = [] for _ in range(A ): # Create output image a = Image.new("RGB", (len(cells[0] ), len(A )) ) a = img.load() # Save cells to image for x in range(len(A ) ): for y in range(len(cells[0] ) ): a = 255 - cells[y][x] * 255 a = (colour, colour, colour) # Save image images.append(A ) a = new_generation(A ) return images if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = generate_images(GLIDER, 16) images[0].save('out.gif', save_all=True, append_images=images[1:])
662
import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case__ (_UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer SCREAMING_SNAKE_CASE_ : Optional[int] = True SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast SCREAMING_SNAKE_CASE_ : str = True def __UpperCAmelCase ( self : Optional[int] ) -> str: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt a = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] a = {"unk_token": "<unk>"} a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__lowerCamelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__lowerCamelCase ) ) def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]: a = "lower newer" a = "lower newer" return input_text, output_text def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]: a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) a = "lower newer" a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) a = tokens + [tokenizer.unk_token] a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple: a = self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]: a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" ) a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase ) a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase ) a = tokenizer.encode( "sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase ) a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def __UpperCAmelCase ( self : Any ) -> str: a = self.get_tokenizer() a = "Encode this sequence." a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]] # Testing encoder arguments a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(__lowerCamelCase , __lowerCamelCase ) a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) tokenizer.add_special_tokens({"bos_token": "<s>"} ) a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(__lowerCamelCase , __lowerCamelCase ) # Testing spaces after special tokens a = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space a = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) a = "Encode <mask> sequence" a = "Encode <mask>sequence" a = tokenizer.encode(__lowerCamelCase ) a = encoded.index(__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) a = tokenizer.encode(__lowerCamelCase ) a = encoded.index(__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : str ) -> List[str]: pass def __UpperCAmelCase ( self : int ) -> int: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) a = "A, <mask> AllenNLP sentence." a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase ) a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( __lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( __lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): a = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase ) self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase ) self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase ) def __UpperCAmelCase ( self : List[Any] ) -> Dict: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): a = "hello" # `hello` is a token in the vocabulary of `pretrained_name` a = f"""{text_of_1_token} {text_of_1_token}""" a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = f""" {text}""" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
662
1
def __magic_name__ ( A : int = 50 ): '''simple docstring''' a = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2, 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(F'''{solution() = }''')
662
from typing import TYPE_CHECKING from ....utils import _LazyModule __lowerCAmelCase : int = {'tokenization_tapex': ['TapexTokenizer']} if TYPE_CHECKING: from .tokenization_tapex import TapexTokenizer else: import sys __lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
662
1
import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin __lowerCAmelCase : Any = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right __lowerCAmelCase : Any = 25_0004 __lowerCAmelCase : Optional[Any] = 25_0020 @require_sentencepiece @require_tokenizers class snake_case__ (_UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = MBartaaTokenizer SCREAMING_SNAKE_CASE_ : Union[str, Any] = MBartaaTokenizerFast SCREAMING_SNAKE_CASE_ : Union[str, Any] = True SCREAMING_SNAKE_CASE_ : int = True def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]: super().setUp() # We have a SentencePiece fixture for testing a = MBartaaTokenizer(__lowerCamelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def __UpperCAmelCase ( self : int ) -> Any: a = "<s>" a = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase ) def __UpperCAmelCase ( self : str ) -> Union[str, Any]: a = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(__lowerCamelCase ) , 10_54 ) def __UpperCAmelCase ( self : str ) -> Dict: self.assertEqual(self.get_tokenizer().vocab_size , 10_54 ) def __UpperCAmelCase ( self : Tuple ) -> List[str]: a = MBartaaTokenizer(__lowerCamelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__lowerCamelCase ) a = tokenizer.tokenize("This is a test" ) self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) a = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __lowerCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , ) a = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) self.assertListEqual( __lowerCamelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) a = tokenizer.convert_ids_to_tokens(__lowerCamelCase ) self.assertListEqual( __lowerCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , ) @slow def __UpperCAmelCase ( self : List[Any] ) -> int: # fmt: off a = {"input_ids": [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , ) def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return a = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) a = tempfile.mkdtemp() a = tokenizer_r.save_pretrained(__lowerCamelCase ) a = tokenizer_p.save_pretrained(__lowerCamelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) a = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f ) self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase ) # Checks everything loads correctly in the same way a = tokenizer_r.from_pretrained(__lowerCamelCase ) a = tokenizer_p.from_pretrained(__lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__lowerCamelCase ) # Save tokenizer rust, legacy_format=True a = tempfile.mkdtemp() a = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase ) a = tokenizer_p.save_pretrained(__lowerCamelCase ) # Checks it save with the same files self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase ) # Checks everything loads correctly in the same way a = tokenizer_r.from_pretrained(__lowerCamelCase ) a = tokenizer_p.from_pretrained(__lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) ) shutil.rmtree(__lowerCamelCase ) # Save tokenizer rust, legacy_format=False a = tempfile.mkdtemp() a = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase ) a = tokenizer_p.save_pretrained(__lowerCamelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way a = tokenizer_r.from_pretrained(__lowerCamelCase ) a = tokenizer_p.from_pretrained(__lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) ) shutil.rmtree(__lowerCamelCase ) @require_torch @require_sentencepiece @require_tokenizers class snake_case__ (unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = """facebook/mbart-large-50-one-to-many-mmt""" SCREAMING_SNAKE_CASE_ : Any = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""", ] SCREAMING_SNAKE_CASE_ : Optional[Any] = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei""" """ pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor""" """ face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] SCREAMING_SNAKE_CASE_ : Any = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2] @classmethod def __UpperCAmelCase ( cls : Optional[int] ) -> str: a = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" ) a = 1 return cls def __UpperCAmelCase ( self : Optional[Any] ) -> str: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 25_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 25_00_04 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 25_00_20 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 25_00_38 ) def __UpperCAmelCase ( self : str ) -> Union[str, Any]: a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , __lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] ) -> Dict: self.assertIn(__lowerCamelCase , self.tokenizer.all_special_ids ) a = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2] a = self.tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) self.assertNotIn(self.tokenizer.eos_token , __lowerCamelCase ) def __UpperCAmelCase ( self : Optional[Any] ) -> Any: a = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , __lowerCamelCase ) a = 10 a = self.tokenizer(__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase ).input_ids[0] self.assertEqual(ids[0] , __lowerCamelCase ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) def __UpperCAmelCase ( self : str ) -> int: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_00_53, 25_00_01] ) def __UpperCAmelCase ( self : Any ) -> Union[str, Any]: a = tempfile.mkdtemp() a = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(__lowerCamelCase ) a = MBartaaTokenizer.from_pretrained(__lowerCamelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCamelCase ) @require_torch def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple: a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , return_tensors="pt" ) a = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: a = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , ) a = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) a = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , __lowerCamelCase ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def __UpperCAmelCase ( self : List[str] ) -> str: a = self.tokenizer(self.src_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=3 , return_tensors="pt" ) a = self.tokenizer( text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=10 , return_tensors="pt" ) a = targets["input_ids"] a = shift_tokens_right(__lowerCamelCase , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def __UpperCAmelCase ( self : str ) -> Optional[Any]: a = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" ) self.assertEqual( nested_simplify(__lowerCamelCase ) , { # en_XX, A, test, EOS "input_ids": [[25_00_04, 62, 30_34, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 25_00_01, } , )
662
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __lowerCAmelCase : Dict = { 'configuration_blip': [ 'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlipConfig', 'BlipTextConfig', 'BlipVisionConfig', ], 'processing_blip': ['BlipProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = ['BlipImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[Any] = [ 'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlipModel', 'BlipPreTrainedModel', 'BlipForConditionalGeneration', 'BlipForQuestionAnswering', 'BlipVisionModel', 'BlipTextModel', 'BlipForImageTextRetrieval', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Any = [ 'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFBlipModel', 'TFBlipPreTrainedModel', 'TFBlipForConditionalGeneration', 'TFBlipForQuestionAnswering', 'TFBlipVisionModel', 'TFBlipTextModel', 'TFBlipForImageTextRetrieval', ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys __lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
662
1
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration __lowerCAmelCase : Any = [ # tf -> hf ('/', '.'), ('layer_', 'layers.'), ('kernel', 'weight'), ('beta', 'bias'), ('gamma', 'weight'), ('pegasus', 'model'), ] __lowerCAmelCase : Tuple = [ ('.output.dense', '.fc2'), ('intermediate.LayerNorm', 'final_layer_norm'), ('intermediate.dense', 'fc1'), ] __lowerCAmelCase : Tuple = ( INIT_COMMON + [ ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.out_proj'), ('attention.self', 'self_attn'), ('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'), ('attention.encdec_output.dense', 'encoder_attn.out_proj'), ('attention.encdec', 'encoder_attn'), ('key', 'k_proj'), ('value', 'v_proj'), ('query', 'q_proj'), ('decoder.LayerNorm', 'decoder.layernorm_embedding'), ] + END_COMMON ) __lowerCAmelCase : Tuple = ( INIT_COMMON + [ ('embeddings.word_embeddings', 'shared.weight'), ('embeddings.position_embeddings', 'embed_positions.weight'), ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.output'), ('attention.self', 'self_attn.self'), ('encoder.LayerNorm', 'encoder.layernorm_embedding'), ] + END_COMMON ) __lowerCAmelCase : List[Any] = [ 'encdec/key/bias', 'encdec/query/bias', 'encdec/value/bias', 'self/key/bias', 'self/query/bias', 'self/value/bias', 'encdec_output/dense/bias', 'attention/output/dense/bias', ] def __magic_name__ ( A : Any, A : Optional[int] ): '''simple docstring''' for tf_name, hf_name in patterns: a = k.replace(A, A ) return k def __magic_name__ ( A : dict, A : dict ): '''simple docstring''' a = BigBirdPegasusConfig(**A ) a = BigBirdPegasusForConditionalGeneration(A ) a = torch_model.state_dict() a = {} # separating decoder weights a = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )} a = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )} for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion" ): a = [k.endswith(A ) for ending in KEYS_TO_IGNORE] if any(A ): continue a = DECODER_PATTERNS a = rename_state_dict_key(A, A ) if new_k not in state_dict: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ["dense", "query", "key", "value"] ): a = v.T a = torch.from_numpy(A ) assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion" ): a = [k.endswith(A ) for ending in KEYS_TO_IGNORE] if any(A ): continue a = REMAINING_PATTERNS a = rename_state_dict_key(A, A ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ["dense", "query", "key", "value"] ): a = v.T a = torch.from_numpy(A ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" a = mapping["model.embed_positions.weight"] a = mapping.pop("model.embed_positions.weight" ) a , a = torch_model.load_state_dict(A, strict=A ) a = [ k for k in missing if k not in [ "final_logits_bias", "model.encoder.embed_tokens.weight", "model.decoder.embed_tokens.weight", "lm_head.weight", ] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def __magic_name__ ( A : Dict ): '''simple docstring''' a = tf.train.list_variables(A ) a = {} a = ["global_step"] for name, shape in tqdm(A, desc="converting tf checkpoint to dict" ): a = any(pat in name for pat in ignore_name ) if skip_key: continue a = tf.train.load_variable(A, A ) a = array return tf_weights def __magic_name__ ( A : str, A : str, A : dict ): '''simple docstring''' a = get_tf_weights_as_numpy(A ) a = convert_bigbird_pegasus(A, A ) torch_model.save_pretrained(A ) if __name__ == "__main__": __lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.') __lowerCAmelCase : Dict = parser.parse_args() __lowerCAmelCase : Tuple = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
662
import math import flax.linen as nn import jax.numpy as jnp def __magic_name__ ( A : jnp.ndarray, A : int, A : float = 1, A : float = 1, A : float = 1.0E4, A : bool = False, A : float = 1.0, ): '''simple docstring''' assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even""" a = float(embedding_dim // 2 ) a = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) a = min_timescale * jnp.exp(jnp.arange(A, dtype=jnp.floataa ) * -log_timescale_increment ) a = jnp.expand_dims(A, 1 ) * jnp.expand_dims(A, 0 ) # scale embeddings a = scale * emb if flip_sin_to_cos: a = jnp.concatenate([jnp.cos(A ), jnp.sin(A )], axis=1 ) else: a = jnp.concatenate([jnp.sin(A ), jnp.cos(A )], axis=1 ) a = jnp.reshape(A, [jnp.shape(A )[0], embedding_dim] ) return signal class snake_case__ (nn.Module ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = 32 SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa @nn.compact def __call__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> List[Any]: a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__lowerCamelCase ) a = nn.silu(__lowerCamelCase ) a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__lowerCamelCase ) return temb class snake_case__ (nn.Module ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = 32 SCREAMING_SNAKE_CASE_ : bool = False SCREAMING_SNAKE_CASE_ : float = 1 @nn.compact def __call__( self : Tuple , __lowerCamelCase : int ) -> Union[str, Any]: return get_sinusoidal_embeddings( __lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
662
1
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf __lowerCAmelCase : List[Any] = logging.get_logger(__name__) @dataclass class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = [ """no_inference""", """no_cuda""", """no_tpu""", """no_speed""", """no_memory""", """no_env_print""", """no_multi_process""", ] def __init__( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> Union[str, Any]: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: a = deprecated_arg[3:] a = not kwargs.pop(__lowerCamelCase ) logger.warning( f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or""" f""" {positive_arg}={kwargs[positive_arg]}""" ) a = kwargs.pop("tpu_name" , self.tpu_name ) a = kwargs.pop("device_idx" , self.device_idx ) a = kwargs.pop("eager_mode" , self.eager_mode ) a = kwargs.pop("use_xla" , self.use_xla ) super().__init__(**__lowerCamelCase ) SCREAMING_SNAKE_CASE_ : str = field( default=_UpperCamelCase , metadata={"""help""": """Name of TPU"""} , ) SCREAMING_SNAKE_CASE_ : int = field( default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , ) SCREAMING_SNAKE_CASE_ : bool = field(default=_UpperCamelCase , metadata={"""help""": """Benchmark models in eager model."""} ) SCREAMING_SNAKE_CASE_ : bool = field( default=_UpperCamelCase , metadata={ """help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.""" } , ) @cached_property def __UpperCAmelCase ( self : Tuple ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ["tf"] ) a = None if self.tpu: try: if self.tpu_name: a = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: a = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: a = None return tpu @cached_property def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ["tf"] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) a = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" ) a = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" ) else: tf.config.set_visible_devices([] , "GPU" ) # disable GPU a = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" ) return strategy @property def __UpperCAmelCase ( self : Any ) -> bool: requires_backends(self , ["tf"] ) return self._setup_tpu is not None @property def __UpperCAmelCase ( self : Optional[Any] ) -> "tf.distribute.Strategy": requires_backends(self , ["tf"] ) return self._setup_strategy @property def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]: requires_backends(self , ["tf"] ) return tf.config.list_physical_devices("GPU" ) @property def __UpperCAmelCase ( self : int ) -> int: requires_backends(self , ["tf"] ) if self.cuda: return len(self.gpu_list ) return 0 @property def __UpperCAmelCase ( self : List[str] ) -> bool: return self.n_gpu > 0
662
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class snake_case__ (unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : int ) -> Dict: a = tempfile.mkdtemp() a = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "的", "价", "格", "是", "15", "便", "alex", "##andra", ",", "。", "-", "t", "shirt", ] a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) a = { "do_resize": True, "size": {"height": 2_24, "width": 2_24}, "do_center_crop": True, "crop_size": {"height": 18, "width": 18}, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], "do_convert_rgb": True, } a = os.path.join(self.tmpdirname , __lowerCamelCase ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]: return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str: return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple: return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __UpperCAmelCase ( self : int ) -> List[str]: a = self.get_tokenizer() a = self.get_rust_tokenizer() a = self.get_image_processor() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase ) a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase ) self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase ) self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]: a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" ) a = self.get_image_processor(do_normalize=__lowerCamelCase ) a = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCamelCase ) def __UpperCAmelCase ( self : Tuple ) -> Dict: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = self.prepare_image_inputs() a = image_processor(__lowerCamelCase , return_tensors="np" ) a = processor(images=__lowerCamelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __UpperCAmelCase ( self : str ) -> Optional[int]: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = "Alexandra,T-shirt的价格是15便士。" a = processor(text=__lowerCamelCase ) a = tokenizer(__lowerCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCAmelCase ( self : List[Any] ) -> Any: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = "Alexandra,T-shirt的价格是15便士。" a = self.prepare_image_inputs() a = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__lowerCamelCase ): processor() def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] a = processor.batch_decode(__lowerCamelCase ) a = tokenizer.batch_decode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : Dict ) -> List[str]: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = "Alexandra,T-shirt的价格是15便士。" a = self.prepare_image_inputs() a = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
662
1
import torch def __magic_name__ ( ): '''simple docstring''' if torch.cuda.is_available(): a = torch.cuda.device_count() else: a = 0 print(F"""Successfully ran on {num_gpus} GPUs""" ) if __name__ == "__main__": main()
662
import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def __magic_name__ ( A : Union[str, Any] ): '''simple docstring''' a = fname.split(os.path.sep )[-1] return re.search(R"^(.*)_\d+\.jpg$", A ).groups()[0] class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None ) -> Tuple: a = file_names a = image_transform a = label_to_id def __len__( self : Any ) -> Tuple: return len(self.file_names ) def __getitem__( self : List[Any] , __lowerCamelCase : List[Any] ) -> int: a = self.file_names[idx] a = PIL.Image.open(__lowerCamelCase ) a = raw_image.convert("RGB" ) if self.image_transform is not None: a = self.image_transform(__lowerCamelCase ) a = extract_label(__lowerCamelCase ) if self.label_to_id is not None: a = self.label_to_id[label] return {"image": image, "label": label} def __magic_name__ ( A : str, A : int ): '''simple docstring''' if args.with_tracking: a = Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir ) else: a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs a = config["lr"] a = int(config["num_epochs"] ) a = int(config["seed"] ) a = int(config["batch_size"] ) a = config["image_size"] if not isinstance(A, (list, tuple) ): a = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps, "isdigit" ): if args.checkpointing_steps == "epoch": a = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): a = int(args.checkpointing_steps ) else: raise ValueError( F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" ) else: a = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: a = os.path.split(A )[-1].split("." )[0] accelerator.init_trackers(A, A ) # Grab all the image filenames a = [os.path.join(args.data_dir, A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )] # Build the label correspondences a = [extract_label(A ) for fname in file_names] a = list(set(A ) ) id_to_label.sort() a = {lbl: i for i, lbl in enumerate(A )} # Set the seed before splitting the data. np.random.seed(A ) torch.manual_seed(A ) torch.cuda.manual_seed_all(A ) # Split our filenames between train and validation a = np.random.permutation(len(A ) ) a = int(0.8 * len(A ) ) a = random_perm[:cut] a = random_perm[cut:] # For training we use a simple RandomResizedCrop a = Compose([RandomResizedCrop(A, scale=(0.5, 1.0) ), ToTensor()] ) a = PetsDataset( [file_names[i] for i in train_split], image_transform=A, label_to_id=A ) # For evaluation, we use a deterministic Resize a = Compose([Resize(A ), ToTensor()] ) a = PetsDataset([file_names[i] for i in eval_split], image_transform=A, label_to_id=A ) # Instantiate dataloaders. a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 ) a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) a = create_model("resnet50d", pretrained=A, num_classes=len(A ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). a = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): a = False for param in model.get_classifier().parameters(): a = True # We normalize the batches of images to be a bit faster. a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device ) a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer a = torch.optim.Adam(params=model.parameters(), lr=lr / 25 ) # Instantiate learning rate scheduler a = OneCycleLR(optimizer=A, max_lr=A, epochs=A, steps_per_epoch=len(A ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. a , a , a , a , a = accelerator.prepare( A, A, A, A, A ) # We need to keep track of how many total steps we have iterated over a = 0 # We also need to keep track of the starting epoch so files are named properly a = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" ) accelerator.load_state(args.resume_from_checkpoint ) a = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` a = os.path.splitext(A )[0] if "epoch" in training_difference: a = int(training_difference.replace("epoch_", "" ) ) + 1 a = None else: a = int(training_difference.replace("step_", "" ) ) a = resume_step // len(A ) resume_step -= starting_epoch * len(A ) # Now we train the model for epoch in range(A, A ): model.train() if args.with_tracking: a = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step a = accelerator.skip_first_batches(A, A ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader a = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. a = {k: v.to(accelerator.device ) for k, v in batch.items()} a = (batch["image"] - mean) / std a = model(A ) a = torch.nn.functional.cross_entropy(A, batch["label"] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(A ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(A, A ): a = F"""step_{overall_step}""" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: a = os.path.join(args.output_dir, A ) accelerator.save_state(A ) model.eval() a = 0 a = 0 for step, batch in enumerate(A ): # We could avoid this line since we set the accelerator with `device_placement=True`. a = {k: v.to(accelerator.device ) for k, v in batch.items()} a = (batch["image"] - mean) / std with torch.no_grad(): a = model(A ) a = outputs.argmax(dim=-1 ) a , a = accelerator.gather_for_metrics((predictions, batch["label"]) ) a = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() a = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" ) if args.with_tracking: accelerator.log( { "accuracy": 100 * eval_metric, "train_loss": total_loss.item() / len(A ), "epoch": epoch, }, step=A, ) if checkpointing_steps == "epoch": a = F"""epoch_{epoch}""" if args.output_dir is not None: a = os.path.join(args.output_dir, A ) accelerator.save_state(A ) if args.with_tracking: accelerator.end_training() def __magic_name__ ( ): '''simple docstring''' a = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument("--data_dir", required=A, help="The data folder on disk." ) parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." ) parser.add_argument( "--mixed_precision", type=A, default=A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." ) parser.add_argument( "--checkpointing_steps", type=A, default=A, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--output_dir", type=A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", ) parser.add_argument( "--resume_from_checkpoint", type=A, default=A, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", ) parser.add_argument( "--project_dir", type=A, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", ) a = parser.parse_args() a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224} training_function(A, A ) if __name__ == "__main__": main()
662
1
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo __lowerCAmelCase : Optional[Any] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' __lowerCAmelCase : str = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' __lowerCAmelCase : List[Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case__ (datasets.Metric ): """simple docstring""" def __UpperCAmelCase ( self : int ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[List[List[str]]] , __lowerCamelCase : List[List[str]] , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=__lowerCamelCase , hypotheses=__lowerCamelCase , min_len=__lowerCamelCase , max_len=__lowerCamelCase ) }
662
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __lowerCAmelCase : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8') __lowerCAmelCase : Tuple = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('utf-8').split() __lowerCAmelCase : Dict = '|'.join(sys.argv[1:]) __lowerCAmelCase : List[Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''') __lowerCAmelCase : List[Any] = [x for x in modified_files if regex.match(x)] print(' '.join(relevant_modified_files), end='')
662
1
from __future__ import annotations __lowerCAmelCase : Optional[Any] = list[tuple[int, int]] __lowerCAmelCase : Optional[int] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __lowerCAmelCase : str = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class snake_case__ : """simple docstring""" def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , __lowerCamelCase : Node | None , ) -> Any: a = pos_x a = pos_y a = (pos_y, pos_x) a = goal_x a = goal_y a = g_cost a = parent a = self.calculate_heuristic() def __UpperCAmelCase ( self : Tuple ) -> float: a = abs(self.pos_x - self.goal_x ) a = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self : str , __lowerCamelCase : Tuple ) -> bool: return self.f_cost < other.f_cost class snake_case__ : """simple docstring""" def __init__( self : Dict , __lowerCamelCase : tuple[int, int] , __lowerCamelCase : tuple[int, int] ) -> List[str]: a = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCamelCase ) a = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , __lowerCamelCase ) a = [self.start] a = [] a = False def __UpperCAmelCase ( self : str ) -> Path | None: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() a = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: a = True return self.retrace_path(__lowerCamelCase ) self.closed_nodes.append(__lowerCamelCase ) a = self.get_successors(__lowerCamelCase ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(__lowerCamelCase ) else: # retrieve the best current path a = self.open_nodes.pop(self.open_nodes.index(__lowerCamelCase ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(__lowerCamelCase ) else: self.open_nodes.append(__lowerCamelCase ) if not self.reached: return [self.start.pos] return None def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Node ) -> list[Node]: a = [] for action in delta: a = parent.pos_x + action[1] a = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCamelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( __lowerCamelCase , __lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCamelCase , ) ) return successors def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Node | None ) -> Path: a = node a = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) a = current_node.parent path.reverse() return path if __name__ == "__main__": __lowerCAmelCase : List[str] = (0, 0) __lowerCAmelCase : str = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('------') __lowerCAmelCase : Any = GreedyBestFirst(init, goal) __lowerCAmelCase : Dict = greedy_bf.search() if path: for pos_x, pos_y in path: __lowerCAmelCase : Union[str, Any] = 2 for elem in grid: print(elem)
662
def __magic_name__ ( A : int, A : int, A : int ): '''simple docstring''' if exponent == 1: return base if exponent % 2 == 0: a = _modexpt(A, exponent // 2, A ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(A, exponent - 1, A )) % modulo_value def __magic_name__ ( A : int = 1777, A : int = 1855, A : int = 8 ): '''simple docstring''' a = base for _ in range(1, A ): a = _modexpt(A, A, 10**digits ) return result if __name__ == "__main__": print(F'''{solution() = }''')
662
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __lowerCAmelCase : Dict = { 'configuration_blip': [ 'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlipConfig', 'BlipTextConfig', 'BlipVisionConfig', ], 'processing_blip': ['BlipProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = ['BlipImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[Any] = [ 'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlipModel', 'BlipPreTrainedModel', 'BlipForConditionalGeneration', 'BlipForQuestionAnswering', 'BlipVisionModel', 'BlipTextModel', 'BlipForImageTextRetrieval', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Any = [ 'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFBlipModel', 'TFBlipPreTrainedModel', 'TFBlipForConditionalGeneration', 'TFBlipForQuestionAnswering', 'TFBlipVisionModel', 'TFBlipTextModel', 'TFBlipForImageTextRetrieval', ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys __lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
662
def __magic_name__ ( A : str, A : str ): '''simple docstring''' def get_matched_characters(A : str, A : str ) -> str: a = [] a = min(len(_stra ), len(_stra ) ) // 2 for i, l in enumerate(_stra ): a = int(max(0, i - limit ) ) a = int(min(i + limit + 1, len(_stra ) ) ) if l in _stra[left:right]: matched.append(A ) a = F"""{_stra[0:_stra.index(A )]} {_stra[_stra.index(A ) + 1:]}""" return "".join(A ) # matching characters a = get_matched_characters(A, A ) a = get_matched_characters(A, A ) a = len(A ) # transposition a = ( len([(ca, ca) for ca, ca in zip(A, A ) if ca != ca] ) // 2 ) if not match_count: a = 0.0 else: a = ( 1 / 3 * ( match_count / len(A ) + match_count / len(A ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters a = 0 for ca, ca in zip(stra[:4], stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler('hello', 'world'))
662
1
def __magic_name__ ( A : str, A : bool = False ): '''simple docstring''' if not isinstance(A, A ): a = F"""Expected string as input, found {type(A )}""" raise ValueError(A ) if not isinstance(A, A ): a = F"""Expected boolean as use_pascal parameter, found {type(A )}""" raise ValueError(A ) a = input_str.split("_" ) a = 0 if use_pascal else 1 a = words[start_index:] a = [word[0].upper() + word[1:] for word in words_to_capitalize] a = "" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
662
__lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)} def __magic_name__ ( A : int ): '''simple docstring''' return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) ) def __magic_name__ ( ): '''simple docstring''' return sum( number for number in range(1000, 1000000 ) if number == digits_fifth_powers_sum(A ) ) if __name__ == "__main__": print(solution())
662
1
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __lowerCAmelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name __lowerCAmelCase : List[str] = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n' def __magic_name__ ( A : Optional[int], A : Optional[Any], A : Tuple=8 ): '''simple docstring''' a = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 a = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : int , __lowerCamelCase : UNetaDConditionModel , __lowerCamelCase : DDPMScheduler , __lowerCamelCase : VQModel , ) -> List[str]: super().__init__() self.register_modules( unet=__lowerCamelCase , scheduler=__lowerCamelCase , movq=__lowerCamelCase , ) a = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __UpperCAmelCase ( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ) -> Tuple: if latents is None: a = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase , dtype=__lowerCamelCase ) else: if latents.shape != shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) a = latents.to(__lowerCamelCase ) a = latents * scheduler.init_noise_sigma return latents def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[str, Any]=0 ) -> List[Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) a = torch.device(f"""cuda:{gpu_id}""" ) a = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Any=0 ) -> str: if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) a = torch.device(f"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=__lowerCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) a = None for cpu_offloaded_model in [self.unet, self.movq]: a , a = cpu_offload_with_hook(__lowerCamelCase , __lowerCamelCase , prev_module_hook=__lowerCamelCase ) # We'll offload the last model manually. a = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __UpperCAmelCase ( self : List[str] ) -> List[str]: if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(__lowerCamelCase , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(__lowerCamelCase ) def __call__( self : Union[str, Any] , __lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 5_12 , __lowerCamelCase : int = 1_00 , __lowerCamelCase : float = 4.0 , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ) -> Union[str, Any]: a = self._execution_device a = guidance_scale > 1.0 if isinstance(__lowerCamelCase , __lowerCamelCase ): a = torch.cat(__lowerCamelCase , dim=0 ) a = image_embeds.shape[0] * num_images_per_prompt if isinstance(__lowerCamelCase , __lowerCamelCase ): a = torch.cat(__lowerCamelCase , dim=0 ) if do_classifier_free_guidance: a = image_embeds.repeat_interleave(__lowerCamelCase , dim=0 ) a = negative_image_embeds.repeat_interleave(__lowerCamelCase , dim=0 ) a = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__lowerCamelCase ) self.scheduler.set_timesteps(__lowerCamelCase , device=__lowerCamelCase ) a = self.scheduler.timesteps a = self.unet.config.in_channels a , a = downscale_height_and_width(__lowerCamelCase , __lowerCamelCase , self.movq_scale_factor ) # create initial latent a = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.scheduler , ) for i, t in enumerate(self.progress_bar(__lowerCamelCase ) ): # expand the latents if we are doing classifier free guidance a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents a = {"image_embeds": image_embeds} a = self.unet( sample=__lowerCamelCase , timestep=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , added_cond_kwargs=__lowerCamelCase , return_dict=__lowerCamelCase , )[0] if do_classifier_free_guidance: a , a = noise_pred.split(latents.shape[1] , dim=1 ) a , a = noise_pred.chunk(2 ) a , a = variance_pred.chunk(2 ) a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) a = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): a , a = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 a = self.scheduler.step( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase , )[0] # post-processing a = self.movq.decode(__lowerCamelCase , force_not_quantize=__lowerCamelCase )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: a = image * 0.5 + 0.5 a = image.clamp(0 , 1 ) a = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": a = self.numpy_to_pil(__lowerCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__lowerCamelCase )
662
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class snake_case__ (unittest.TestCase ): """simple docstring""" def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any=7 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : int=30 , __lowerCamelCase : int=4_00 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=1 / 2_55 , __lowerCamelCase : Optional[int]=True , ) -> str: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33} a = parent a = batch_size a = num_channels a = min_resolution a = max_resolution a = do_resize a = size a = do_normalize a = image_mean a = image_std a = do_rescale a = rescale_factor a = do_pad def __UpperCAmelCase ( self : List[Any] ) -> Any: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str=False ) -> List[str]: if not batched: a = image_inputs[0] if isinstance(__lowerCamelCase , Image.Image ): a , a = image.size else: a , a = image.shape[1], image.shape[2] if w < h: a = int(self.size["shortest_edge"] * h / w ) a = self.size["shortest_edge"] elif w > h: a = self.size["shortest_edge"] a = int(self.size["shortest_edge"] * w / h ) else: a = self.size["shortest_edge"] a = self.size["shortest_edge"] else: a = [] for image in image_inputs: a , a = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0] a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class snake_case__ (_UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = DetaImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]: a = DetaImageProcessingTester(self ) @property def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self : Optional[int] ) -> Tuple: a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) ) self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) ) self.assertTrue(hasattr(__lowerCamelCase , "size" ) ) def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: a = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} ) self.assertEqual(image_processor.do_pad , __lowerCamelCase ) def __UpperCAmelCase ( self : Any ) -> int: pass def __UpperCAmelCase ( self : Any ) -> Any: # Initialize image_processing a = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: # Initialize image_processing a = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) # Test not batched input a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCAmelCase ( self : Any ) -> List[str]: # Initialize image_processing a = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __UpperCAmelCase ( self : Any ) -> List[Any]: # prepare image and target a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: a = json.loads(f.read() ) a = {"image_id": 3_97_69, "annotations": target} # encode them a = DetaImageProcessor() a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" ) # verify pixel values a = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase ) a = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) ) # verify area a = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) ) # verify boxes a = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase ) a = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) ) # verify image_id a = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) ) # verify is_crowd a = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) ) # verify class_labels a = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) ) # verify orig_size a = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) ) # verify size a = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) ) @slow def __UpperCAmelCase ( self : Any ) -> Union[str, Any]: # prepare image, target and masks_path a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: a = json.loads(f.read() ) a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target} a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them a = DetaImageProcessor(format="coco_panoptic" ) a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" ) # verify pixel values a = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase ) a = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) ) # verify area a = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) ) # verify boxes a = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase ) a = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) ) # verify image_id a = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) ) # verify is_crowd a = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) ) # verify class_labels a = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) ) # verify masks a = 82_28_73 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase ) # verify orig_size a = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) ) # verify size a = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
662
1
__lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)} def __magic_name__ ( A : int ): '''simple docstring''' return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) ) def __magic_name__ ( ): '''simple docstring''' return sum( number for number in range(1000, 1000000 ) if number == digits_fifth_powers_sum(A ) ) if __name__ == "__main__": print(solution())
662
def __magic_name__ ( A : list ): '''simple docstring''' for i in range(len(A ) - 1, 0, -1 ): a = False for j in range(A, 0, -1 ): if unsorted[j] < unsorted[j - 1]: a , a = unsorted[j - 1], unsorted[j] a = True for j in range(A ): if unsorted[j] > unsorted[j + 1]: a , a = unsorted[j + 1], unsorted[j] a = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip() __lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(',')] print(F'''{cocktail_shaker_sort(unsorted) = }''')
662
1
import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class snake_case__ (_UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = PriorTransformer SCREAMING_SNAKE_CASE_ : List[str] = """hidden_states""" @property def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]: a = 4 a = 8 a = 7 a = floats_tensor((batch_size, embedding_dim) ).to(__lowerCamelCase ) a = floats_tensor((batch_size, embedding_dim) ).to(__lowerCamelCase ) a = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(__lowerCamelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, Any]=0 ) -> Dict: torch.manual_seed(__lowerCamelCase ) a = 4 a = 8 a = 7 a = torch.randn((batch_size, embedding_dim) ).to(__lowerCamelCase ) a = torch.randn((batch_size, embedding_dim) ).to(__lowerCamelCase ) a = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__lowerCamelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]: return (4, 8) @property def __UpperCAmelCase ( self : str ) -> str: return (4, 8) def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]: a = { "num_attention_heads": 2, "attention_head_dim": 4, "num_layers": 2, "embedding_dim": 8, "num_embeddings": 7, "additional_embeddings": 4, } a = self.dummy_input return init_dict, inputs_dict def __UpperCAmelCase ( self : List[Any] ) -> Any: a , a = PriorTransformer.from_pretrained( "hf-internal-testing/prior-dummy" , output_loading_info=__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(__lowerCamelCase ) a = model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def __UpperCAmelCase ( self : Dict ) -> Dict: a , a = self.prepare_init_args_and_inputs_for_common() a = self.model_class(**__lowerCamelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["hidden_states", "timestep"] self.assertListEqual(arg_names[:2] , __lowerCamelCase ) def __UpperCAmelCase ( self : str ) -> List[Any]: a = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" ) a = model.to(__lowerCamelCase ) if hasattr(__lowerCamelCase , "set_default_attn_processor" ): model.set_default_attn_processor() a = self.get_dummy_seed_input() with torch.no_grad(): a = model(**__lowerCamelCase )[0] a = output[0, :5].flatten().cpu() print(__lowerCamelCase ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. a = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] ) self.assertTrue(torch_all_close(__lowerCamelCase , __lowerCamelCase , rtol=1e-2 ) ) @slow class snake_case__ (unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : str , __lowerCamelCase : str=1 , __lowerCamelCase : Tuple=7_68 , __lowerCamelCase : List[Any]=77 , __lowerCamelCase : Optional[Any]=0 ) -> Optional[Any]: torch.manual_seed(__lowerCamelCase ) a = batch_size a = embedding_dim a = num_embeddings a = torch.randn((batch_size, embedding_dim) ).to(__lowerCamelCase ) a = torch.randn((batch_size, embedding_dim) ).to(__lowerCamelCase ) a = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__lowerCamelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]], [37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]], # fmt: on ] ) def __UpperCAmelCase ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ) -> Tuple: a = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" ) model.to(__lowerCamelCase ) a = self.get_dummy_seed_input(seed=__lowerCamelCase ) with torch.no_grad(): a = model(**__lowerCamelCase )[0] assert list(sample.shape ) == [1, 7_68] a = sample[0, :8].flatten().cpu() print(__lowerCamelCase ) a = torch.tensor(__lowerCamelCase ) assert torch_all_close(__lowerCamelCase , __lowerCamelCase , atol=1e-3 )
662
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo __lowerCAmelCase : Optional[Any] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' __lowerCAmelCase : str = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' __lowerCAmelCase : List[Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case__ (datasets.Metric ): """simple docstring""" def __UpperCAmelCase ( self : int ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[List[List[str]]] , __lowerCamelCase : List[List[str]] , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=__lowerCamelCase , hypotheses=__lowerCamelCase , min_len=__lowerCamelCase , max_len=__lowerCamelCase ) }
662
1
def __magic_name__ ( A : list[list[int]], A : int, A : int, A : list[int] ): '''simple docstring''' if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def __magic_name__ ( A : list[list[int]], A : list[int], A : int ): '''simple docstring''' if curr_ind == len(A ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0, len(A ) ): if valid_connection(A, A, A, A ): # Insert current vertex into path as next transition a = next_ver # Validate created path if util_hamilton_cycle(A, A, curr_ind + 1 ): return True # Backtrack a = -1 return False def __magic_name__ ( A : list[list[int]], A : int = 0 ): '''simple docstring''' a = [-1] * (len(A ) + 1) # initialize start and end of path with starting index a = a = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(A, A, 1 ) else []
662
import argparse import os import re __lowerCAmelCase : Union[str, Any] = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict __lowerCAmelCase : Dict = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings __lowerCAmelCase : Any = re.compile(r'\s*\(\s*"(\S[^"]+)"') def __magic_name__ ( A : int, A : bool = False ): '''simple docstring''' with open(A, "r", encoding="utf-8" ) as f: a = f.read() a = content.split("\n" ) a = [] a = 0 while line_idx < len(A ): if _re_intro_mapping.search(lines[line_idx] ) is not None: a = len(re.search(R"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 a = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": a = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers a = sorted(A, key=lambda A : _re_identifier.search(A ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(A, "w", encoding="utf-8" ) as f: f.write("\n".join(A ) ) elif "\n".join(A ) != content: return True def __magic_name__ ( A : bool = False ): '''simple docstring''' a = [os.path.join(A, A ) for f in os.listdir(A ) if f.endswith(".py" )] a = [sort_auto_mapping(A, overwrite=A ) for fname in fnames] if not overwrite and any(A ): a = [f for f, d in zip(A, A ) if d] raise ValueError( F"""The following files have auto mappings that need sorting: {", ".join(A )}. Run `make style` to fix""" " this." ) if __name__ == "__main__": __lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') __lowerCAmelCase : Optional[Any] = parser.parse_args() sort_all_auto_mappings(not args.check_only)
662
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: __lowerCAmelCase : str = None __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} __lowerCAmelCase : List[Any] = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', }, 'tokenizer_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json', }, } __lowerCAmelCase : Union[str, Any] = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } __lowerCAmelCase : List[str] = '▁' # Segments (not really needed) __lowerCAmelCase : Any = 0 __lowerCAmelCase : Union[str, Any] = 1 __lowerCAmelCase : List[str] = 2 __lowerCAmelCase : Optional[Any] = 3 __lowerCAmelCase : List[str] = 4 class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : Union[str, Any] = """left""" SCREAMING_SNAKE_CASE_ : Tuple = XLNetTokenizer def __init__( self : Optional[Any] , __lowerCamelCase : int=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : List[str]=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : Tuple="<s>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : str="<unk>" , __lowerCamelCase : Any="<sep>" , __lowerCamelCase : Optional[int]="<pad>" , __lowerCamelCase : List[str]="<cls>" , __lowerCamelCase : Optional[int]="<mask>" , __lowerCamelCase : Union[str, Any]=["<eop>", "<eod>"] , **__lowerCamelCase : Any , ) -> Optional[Any]: # Mask token behave like a normal word, i.e. include the space before it a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , ) a = 3 a = do_lower_case a = remove_space a = keep_accents a = vocab_file a = False if not self.vocab_file else True def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: a = [self.sep_token_id] a = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(__lowerCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ): copyfile(self.vocab_file , __lowerCamelCase ) return (out_vocab_file,)
662
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowerCAmelCase : int = logging.get_logger(__name__) __lowerCAmelCase : Optional[int] = '▁' __lowerCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model'} __lowerCAmelCase : int = { 'vocab_file': { 'google/reformer-crime-and-punishment': ( 'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model' ) } } __lowerCAmelCase : Any = { 'google/reformer-crime-and-punishment': 52_4288, } class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""] def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Dict=[] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ) -> None: a = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , ) a = vocab_file a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCamelCase ) @property def __UpperCAmelCase ( self : Optional[int] ) -> int: return self.sp_model.get_piece_size() def __UpperCAmelCase ( self : Tuple ) -> Dict[str, int]: a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[Any] ) -> Optional[Any]: a = self.__dict__.copy() a = None return state def __setstate__( self : str , __lowerCamelCase : Tuple ) -> List[Any]: a = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): a = {} a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __UpperCAmelCase ( self : int , __lowerCamelCase : str ) -> List[str]: return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict ) -> Any: return self.sp_model.piece_to_id(__lowerCamelCase ) def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ) -> str: if index < self.sp_model.get_piece_size(): a = self.sp_model.IdToPiece(__lowerCamelCase ) return token def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> List[Any]: a = [] a = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCamelCase ) + token a = [] else: current_sub_tokens.append(__lowerCamelCase ) out_string += self.sp_model.decode(__lowerCamelCase ) return out_string.strip() def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCamelCase , "wb" ) as fi: a = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (out_vocab_file,)
662
1
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES __lowerCAmelCase : Optional[Any] = 'tiny-wmt19-en-ru' # Build # borrowed from a test __lowerCAmelCase : List[str] = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] __lowerCAmelCase : Dict = dict(zip(vocab, range(len(vocab)))) __lowerCAmelCase : str = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] with tempfile.TemporaryDirectory() as tmpdirname: __lowerCAmelCase : Any = Path(tmpdirname) __lowerCAmelCase : Any = build_dir / VOCAB_FILES_NAMES['src_vocab_file'] __lowerCAmelCase : Tuple = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file'] __lowerCAmelCase : List[str] = build_dir / VOCAB_FILES_NAMES['merges_file'] with open(src_vocab_file, 'w') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, 'w') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, 'w') as fp: fp.write('\n'.join(merges)) __lowerCAmelCase : Union[str, Any] = FSMTTokenizer( langs=['en', 'ru'], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) __lowerCAmelCase : Any = FSMTConfig( langs=['ru', 'en'], src_vocab_size=1000, tgt_vocab_size=1000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) __lowerCAmelCase : Optional[int] = FSMTForConditionalGeneration(config) print(F'''num of params {tiny_model.num_parameters()}''') # Test __lowerCAmelCase : Dict = tokenizer(['Making tiny model'], return_tensors='pt') __lowerCAmelCase : int = tiny_model(**batch) print('test output:', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F'''Generated {mname_tiny}''') # Upload # transformers-cli upload tiny-wmt19-en-ru
662
from __future__ import annotations import time import numpy as np __lowerCAmelCase : List[str] = [8, 5, 9, 7] __lowerCAmelCase : str = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] __lowerCAmelCase : Optional[Any] = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class snake_case__ : """simple docstring""" def __init__( self : Any , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ) -> None: a = claim_vector a = allocated_resources_table a = maximum_claim_table def __UpperCAmelCase ( self : List[str] ) -> list[int]: return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def __UpperCAmelCase ( self : str ) -> list[int]: return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def __UpperCAmelCase ( self : Dict ) -> list[list[int]]: return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def __UpperCAmelCase ( self : Dict ) -> dict[int, list[int]]: return {self.__need().index(__lowerCamelCase ): i for i in self.__need()} def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Any ) -> None: a = self.__need() a = self.__allocated_resources_table a = self.__available_resources() a = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("_" * 50 + "\n" ) while need_list: a = False for each_need in need_list: a = True for index, need in enumerate(__lowerCamelCase ): if need > available_resources[index]: a = False break if execution: a = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: a = original_need_index print(f"""Process {process_number + 1} is executing.""" ) # remove the process run from stack need_list.remove(__lowerCamelCase ) # update available/freed resources stack a = np.array(__lowerCamelCase ) + np.array( alloc_resources_table[process_number] ) print( "Updated available resource stack for processes: " + " ".join([str(__lowerCamelCase ) for x in available_resources] ) ) break if safe: print("The process is in a safe state.\n" ) else: print("System in unsafe state. Aborting...\n" ) break def __UpperCAmelCase ( self : Any ) -> str: print(" " * 9 + "Allocated Resource Table" ) for item in self.__allocated_resources_table: print( f"""P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}""" + " ".join(f"""{it:>8}""" for it in item ) + "\n" ) print(" " * 9 + "System Resource Table" ) for item in self.__maximum_claim_table: print( f"""P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}""" + " ".join(f"""{it:>8}""" for it in item ) + "\n" ) print( "Current Usage by Active Processes: " + " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) ) print( "Initial Available Resources: " + " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
662
1
import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class snake_case__ (_UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = XLMTokenizer SCREAMING_SNAKE_CASE_ : Optional[Any] = False def __UpperCAmelCase ( self : List[str] ) -> Tuple: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt a = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) a = ["l o 123", "lo w 1456", "e r</w> 1789", ""] a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(__lowerCamelCase ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(__lowerCamelCase ) ) def __UpperCAmelCase ( self : Any , __lowerCamelCase : int ) -> str: a = "lower newer" a = "lower newer" return input_text, output_text def __UpperCAmelCase ( self : List[str] ) -> Tuple: a = XLMTokenizer(self.vocab_file , self.merges_file ) a = "lower" a = ["low", "er</w>"] a = tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) a = tokens + ["<unk>"] a = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase ) @slow def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]: a = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" ) a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase ) a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase ) a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase ) a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
662
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal __lowerCAmelCase : List[Any] = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = TypeVar('DatasetType', Dataset, IterableDataset) def __magic_name__ ( A : List[DatasetType], A : Optional[List[float]] = None, A : Optional[int] = None, A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ): '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("Unable to interleave an empty list of datasets." ) for i, dataset in enumerate(A ): if not isinstance(A, (Dataset, IterableDataset) ): if isinstance(A, (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ "is an empty dataset dictionary." ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(A )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" ) if i == 0: a , a = ( (Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset) ) elif not isinstance(A, A ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" ) if dataset_type is Dataset: return _interleave_map_style_datasets( A, A, A, info=A, split=A, stopping_strategy=A ) else: return _interleave_iterable_datasets( A, A, A, info=A, split=A, stopping_strategy=A ) def __magic_name__ ( A : List[DatasetType], A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : int = 0, ): '''simple docstring''' if not dsets: raise ValueError("Unable to concatenate an empty list of datasets." ) for i, dataset in enumerate(A ): if not isinstance(A, (Dataset, IterableDataset) ): if isinstance(A, (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ "is an empty dataset dictionary." ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(A )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" ) if i == 0: a , a = ( (Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset) ) elif not isinstance(A, A ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if dataset_type is Dataset: return _concatenate_map_style_datasets(A, info=A, split=A, axis=A ) else: return _concatenate_iterable_datasets(A, info=A, split=A, axis=A )
662
1
class snake_case__ : """simple docstring""" def __init__( self : List[Any] ) -> List[str]: a = "" a = "" a = [] def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : int ) -> int: if m == -1: return n + 1 elif n == -1: return m + 1 elif self.dp[m][n] > -1: return self.dp[m][n] else: if self.worda[m] == self.worda[n]: a = self.__min_dist_top_down_dp(m - 1 , n - 1 ) else: a = self.__min_dist_top_down_dp(__lowerCamelCase , n - 1 ) a = self.__min_dist_top_down_dp(m - 1 , __lowerCamelCase ) a = self.__min_dist_top_down_dp(m - 1 , n - 1 ) a = 1 + min(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return self.dp[m][n] def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : str ) -> int: a = worda a = worda a = [[-1 for _ in range(len(__lowerCamelCase ) )] for _ in range(len(__lowerCamelCase ) )] return self.__min_dist_top_down_dp(len(__lowerCamelCase ) - 1 , len(__lowerCamelCase ) - 1 ) def __UpperCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : str ) -> int: a = worda a = worda a = len(__lowerCamelCase ) a = len(__lowerCamelCase ) a = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )] for i in range(m + 1 ): for j in range(n + 1 ): if i == 0: # first string is empty a = j elif j == 0: # second string is empty a = i elif worda[i - 1] == worda[j - 1]: # last characters are equal a = self.dp[i - 1][j - 1] else: a = self.dp[i][j - 1] a = self.dp[i - 1][j] a = self.dp[i - 1][j - 1] a = 1 + min(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return self.dp[m][n] if __name__ == "__main__": __lowerCAmelCase : str = EditDistance() print('****************** Testing Edit Distance DP Algorithm ******************') print() __lowerCAmelCase : Optional[Any] = input('Enter the first string: ').strip() __lowerCAmelCase : Optional[int] = input('Enter the second string: ').strip() print() print(F'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''') print(F'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''') print() print('*************** End of Testing Edit Distance DP Algorithm ***************')
662
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: __lowerCAmelCase : Optional[int] = None __lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) __lowerCAmelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} __lowerCAmelCase : List[Any] = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), }, 'tokenizer_file': { 'google/bigbird-roberta-base': ( 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json' ), 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json' ), }, } __lowerCAmelCase : List[str] = { 'google/bigbird-roberta-base': 4096, 'google/bigbird-roberta-large': 4096, 'google/bigbird-base-trivia-itc': 4096, } __lowerCAmelCase : Any = '▁' class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : str = BigBirdTokenizer SCREAMING_SNAKE_CASE_ : str = ["""input_ids""", """attention_mask"""] SCREAMING_SNAKE_CASE_ : List[int] = [] def __init__( self : int , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Tuple="[CLS]" , **__lowerCamelCase : Optional[Any] , ) -> List[Any]: a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( __lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , ) a = vocab_file a = False if not self.vocab_file else True def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1] def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(__lowerCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ): copyfile(self.vocab_file , __lowerCamelCase ) return (out_vocab_file,)
662
1
import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case__ (_UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer SCREAMING_SNAKE_CASE_ : Optional[int] = True SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast SCREAMING_SNAKE_CASE_ : str = True def __UpperCAmelCase ( self : Optional[int] ) -> str: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt a = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] a = {"unk_token": "<unk>"} a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__lowerCamelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__lowerCamelCase ) ) def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]: a = "lower newer" a = "lower newer" return input_text, output_text def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]: a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) a = "lower newer" a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) a = tokens + [tokenizer.unk_token] a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple: a = self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]: a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" ) a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase ) a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase ) a = tokenizer.encode( "sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase ) a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def __UpperCAmelCase ( self : Any ) -> str: a = self.get_tokenizer() a = "Encode this sequence." a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]] # Testing encoder arguments a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(__lowerCamelCase , __lowerCamelCase ) a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) tokenizer.add_special_tokens({"bos_token": "<s>"} ) a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(__lowerCamelCase , __lowerCamelCase ) # Testing spaces after special tokens a = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space a = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) a = "Encode <mask> sequence" a = "Encode <mask>sequence" a = tokenizer.encode(__lowerCamelCase ) a = encoded.index(__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) a = tokenizer.encode(__lowerCamelCase ) a = encoded.index(__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : str ) -> List[str]: pass def __UpperCAmelCase ( self : int ) -> int: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) a = "A, <mask> AllenNLP sentence." a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase ) a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( __lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( __lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): a = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase ) self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase ) self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase ) def __UpperCAmelCase ( self : List[Any] ) -> Dict: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): a = "hello" # `hello` is a token in the vocabulary of `pretrained_name` a = f"""{text_of_1_token} {text_of_1_token}""" a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = f""" {text}""" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
662
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer __lowerCAmelCase : List[Any] = logging.getLogger(__name__) def __magic_name__ ( ): '''simple docstring''' a = argparse.ArgumentParser( description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." ) parser.add_argument( "--dataset_name", type=A, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", ) parser.add_argument( "--dataset_config", type=A, default="wikitext-103-raw-v1", help="Configuration name of the dataset." ) parser.add_argument( "--tokenizer_name_or_path", type=A, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", ) parser.add_argument( "--shard_size", type=A, default=1000, help="Number of entries to go in a single shard.", ) parser.add_argument("--split", type=A, default="train", choices=["train", "test", "validation"] ) parser.add_argument( "--limit", default=A, type=A, help="Limit the number of shards (used for debugging).", ) parser.add_argument( "--max_length", type=A, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum" " sequence length that is a multiple of 8.", ) parser.add_argument( "--output_dir", default="tf-tpu", type=A, help="Output directory where the TFRecord shards will be saved. If the" " path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord" " shards will be directly saved to a Google Cloud Storage bucket.", ) a = parser.parse_args() return args def __magic_name__ ( A : List[str] ): '''simple docstring''' def fn(A : Tuple ): return tokenizer(examples["text"] ) return fn def __magic_name__ ( A : Any ): '''simple docstring''' a = [] for i in range(len(tokenized_data["input_ids"] ) ): a = { "input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ), "attention_mask": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ), } a = tf.train.Features(feature=A ) a = tf.train.Example(features=A ) a = example.SerializeToString() records.append(A ) return records def __magic_name__ ( A : Union[str, Any] ): '''simple docstring''' a = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split ) if args.limit is not None: a = min(len(A ), args.limit ) a = dataset.select(range(A ) ) print(F"""Limiting the dataset to {args.limit} entries.""" ) a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) a = os.path.join(args.output_dir, args.split ) if not os.path.exists(A ): os.makedirs(A ) else: a = os.path.join(args.output_dir, args.split ) # Tokenize the whole dataset at once. a = tokenize_function(A ) a = dataset.map(A, batched=A, num_proc=4, remove_columns=["text"] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(A : List[Any] ): # Concatenate all texts. a = {k: sum(examples[k], [] ) for k in examples.keys()} a = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 a = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. a = { k: [t[i : i + args.max_length] for i in range(0, A, args.max_length )] for k, t in concatenated_examples.items() } return result a = dataset_tokenized.map(A, batched=A, batch_size=1000, num_proc=4 ) a = 0 a = 0 for shard in range(0, len(A ), args.shard_size ): a = grouped_dataset[shard : shard + args.shard_size] a = len(dataset_snapshot["input_ids"] ) a = os.path.join(A, F"""dataset-{shard_count}-{records_containing}.tfrecord""" ) a = get_serialized_examples(A ) with tf.io.TFRecordWriter(A ) as out_file: for i in range(len(A ) ): a = serialized_examples[i] out_file.write(A ) print("Wrote file {} containing {} records".format(A, A ) ) shard_count += 1 total_records += records_containing with open(F"""split-{args.split}-records-count.txt""", "w" ) as f: print(F"""Total {args.split} records: {total_records}""", file=A ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = parse_args() main(args)
662
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __lowerCAmelCase : Dict = { 'configuration_layoutlmv3': [ 'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv3Config', 'LayoutLMv3OnnxConfig', ], 'processing_layoutlmv3': ['LayoutLMv3Processor'], 'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[int] = ['LayoutLMv3TokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] = [ 'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv3ForQuestionAnswering', 'LayoutLMv3ForSequenceClassification', 'LayoutLMv3ForTokenClassification', 'LayoutLMv3Model', 'LayoutLMv3PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Any = [ 'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLayoutLMv3ForQuestionAnswering', 'TFLayoutLMv3ForSequenceClassification', 'TFLayoutLMv3ForTokenClassification', 'TFLayoutLMv3Model', 'TFLayoutLMv3PreTrainedModel', ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Tuple = ['LayoutLMv3FeatureExtractor'] __lowerCAmelCase : Any = ['LayoutLMv3ImageProcessor'] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys __lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
662
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __magic_name__ ( A : List[str] ): '''simple docstring''' a = {} a = tokenizer(example["content"], truncation=A )["input_ids"] a = len(example["content"] ) / len(output["input_ids"] ) return output __lowerCAmelCase : Dict = HfArgumentParser(PretokenizationArguments) __lowerCAmelCase : str = parser.parse_args() if args.num_workers is None: __lowerCAmelCase : List[Any] = multiprocessing.cpu_count() __lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir) __lowerCAmelCase : List[Any] = time.time() __lowerCAmelCase : str = load_dataset(args.dataset_name, split='train') print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') __lowerCAmelCase : int = time.time() __lowerCAmelCase : Optional[int] = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ 'repo_name', 'path', 'copies', 'size', 'content', 'license', 'hash', 'line_mean', 'line_max', 'alpha_frac', 'autogenerated', ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') __lowerCAmelCase : Tuple = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
662
1
from ..utils import DummyObject, requires_backends class snake_case__ (metaclass=_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = ["""torch""", """scipy"""] def __init__( self : Optional[Any] , *__lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> str: requires_backends(self , ["torch", "scipy"] ) @classmethod def __UpperCAmelCase ( cls : Any , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Optional[Any] ) -> Tuple: requires_backends(cls , ["torch", "scipy"] ) @classmethod def __UpperCAmelCase ( cls : Dict , *__lowerCamelCase : Tuple , **__lowerCamelCase : Optional[int] ) -> List[Any]: requires_backends(cls , ["torch", "scipy"] )
662
import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList __lowerCAmelCase : Union[str, Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif'] class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=1 ) -> Union[str, Any]: a = tokenizer a = dataset a = len(__lowerCamelCase ) if n_tasks is None else n_tasks a = n_copies def __iter__( self : Tuple ) -> str: a = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() ) a = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]: a = start_length a = eof_strings a = tokenizer def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]: a = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) a = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(__lowerCamelCase ) def __magic_name__ ( A : List[Any] ): '''simple docstring''' a = re.split("(%s)" % "|".join(A ), A ) # last string should be "" return "".join(string_list[:-2] ) def __magic_name__ ( A : Union[str, Any], A : Optional[Any], A : List[Any], A : Optional[Any], A : List[str], A : List[Any]=20, **A : Union[str, Any] ): '''simple docstring''' a = defaultdict(A ) # dict of list of generated tokens for step, batch in tqdm(enumerate(A ) ): with torch.no_grad(): a = batch["ids"].shape[-1] a = accelerator.unwrap_model(A ).generate( input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=A, **A ) # each task is generated batch_size times a = batch["task_id"].repeat(A ) a = accelerator.pad_across_processes( A, dim=1, pad_index=tokenizer.pad_token_id ) a , a = accelerator.gather((generated_tokens, generated_tasks) ) a = generated_tokens.cpu().numpy() a = generated_tasks.cpu().numpy() for task, generated_tokens in zip(A, A ): gen_token_dict[task].append(A ) a = [[] for _ in range(A )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: a = tokenizer.decode(A, skip_special_tokens=A, clean_up_tokenization_spaces=A ) code_gens[task].append(remove_last_block(A ) ) return code_gens def __magic_name__ ( ): '''simple docstring''' a = HfArgumentParser(A ) a = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric a = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing a = "false" if args.num_workers is None: a = multiprocessing.cpu_count() # Use dataset load to feed to accelerate a = Accelerator() set_seed(args.seed, device_specific=A ) # Load model and tokenizer a = AutoTokenizer.from_pretrained(args.model_ckpt ) a = tokenizer.eos_token a = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings a = { "do_sample": args.do_sample, "temperature": args.temperature, "max_new_tokens": args.max_new_tokens, "top_p": args.top_p, "top_k": args.top_k, "stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0, A, A )] ), } # Load evaluation dataset and metric a = load_dataset("openai_humaneval" ) a = load_metric("code_eval" ) a = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] ) a = args.n_samples // args.batch_size a = TokenizedDataset(A, human_eval["test"], n_copies=A, n_tasks=A ) # do not confuse args.batch_size, which is actually the num_return_sequences a = DataLoader(A, batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: a = code_eval_metric.compute(references=[""], predictions=[[""]] ) except ValueError as exception: print( "Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`" " flag to enable code evaluation." ) raise exception a , a = accelerator.prepare(A, A ) a = complete_code( A, A, A, A, n_tasks=A, batch_size=args.batch_size, **A, ) if accelerator.is_main_process: a = [] for task in tqdm(range(A ) ): a = human_eval["test"][task]["test"] a = F"""check({human_eval["test"][task]["entry_point"]})""" references.append("\n" + test_func + "\n" + entry_point ) # Evaluate completions with "code_eval" metric a , a = code_eval_metric.compute( references=A, predictions=A, num_workers=args.num_workers ) print(F"""Results: {pass_at_k}""" ) # Save results to json file with open(args.output_file, "w" ) as fp: json.dump(A, A ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
662
1
import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() __lowerCAmelCase : Union[str, Any] = 2 class snake_case__ : """simple docstring""" def __init__( self : Any , *, # begin keyword-only arguments __lowerCamelCase : Dict="<s>" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : Tuple="</s>" , __lowerCamelCase : Any="<unk>" , __lowerCamelCase : str=None , ) -> List[str]: a , a , a , a = bos, unk, pad, eos a = [] a = [] a = {} a = self.add_symbol(__lowerCamelCase ) a = self.add_symbol(__lowerCamelCase ) a = self.add_symbol(__lowerCamelCase ) a = self.add_symbol(__lowerCamelCase ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(__lowerCamelCase ) a = len(self.symbols ) def __eq__( self : List[str] , __lowerCamelCase : int ) -> Optional[Any]: return self.indices == other.indices def __getitem__( self : Any , __lowerCamelCase : Optional[int] ) -> Dict: if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self : List[str] ) -> Any: return len(self.symbols ) def __contains__( self : List[Any] , __lowerCamelCase : List[Any] ) -> Optional[Any]: return sym in self.indices @classmethod def __UpperCAmelCase ( cls : Union[str, Any] , __lowerCamelCase : List[str] ) -> Tuple: a = cls() d.add_from_file(__lowerCamelCase ) return d def __UpperCAmelCase ( self : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : int=False ) -> int: if word in self.indices and not overwrite: a = self.indices[word] a = self.count[idx] + n return idx else: a = len(self.symbols ) a = idx self.symbols.append(__lowerCamelCase ) self.count.append(__lowerCamelCase ) return idx def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Any ) -> Dict: return 0 def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]: if isinstance(__lowerCamelCase , __lowerCamelCase ): try: with open(__lowerCamelCase , "r" , encoding="utf-8" ) as fd: self.add_from_file(__lowerCamelCase ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(__lowerCamelCase ) ) return a = f.readlines() a = self._load_meta(__lowerCamelCase ) for line in lines[indices_start_line:]: try: a , a = line.rstrip().rsplit(" " , 1 ) if field == "#fairseq:overwrite": a = True a , a = line.rsplit(" " , 1 ) else: a = False a = int(__lowerCamelCase ) a = line if word in self and not overwrite: raise RuntimeError( "Duplicate word found when loading Dictionary: '{}'. " "Duplicate words can overwrite earlier ones by adding the " "#fairseq:overwrite flag at the end of the corresponding row " "in the dictionary file. If using the Camembert model, please " "download an updated copy of the model file.".format(__lowerCamelCase ) ) self.add_symbol(__lowerCamelCase , n=__lowerCamelCase , overwrite=__lowerCamelCase ) except ValueError: raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" ) def __magic_name__ ( A : Optional[Any] ): '''simple docstring''' a = dict((re.sub(R"@@$", "", A ), v) if k.endswith("@@" ) else (re.sub(R"$", "</w>", A ), v) for k, v in d.items() ) a = "<s> <pad> </s> <unk>".split() # restore the special tokens for k in keep_keys: del da[F"""{k}</w>"""] a = d[k] # restore return da def __magic_name__ ( A : Optional[Any], A : Any ): '''simple docstring''' if not os.path.exists(A ): raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" ) os.makedirs(A, exist_ok=A ) print(F"""Writing results to {pytorch_dump_folder_path}""" ) # handle various types of models a = os.path.join(A, "checkpoint.pt" ) if not os.path.isfile(A ): raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" ) a = torch.load(A, map_location="cpu" ) a = chkpt["cfg"]["model"] # dicts a = os.path.join(A, "dict.txt" ) if not os.path.isfile(A ): raise ValueError(F"""path to the file {dict_file} does not exist!""" ) a = Dictionary.load(A ) a = rewrite_dict_keys(src_dict.indices ) a = len(A ) a = os.path.join(A, VOCAB_FILES_NAMES["vocab_file"] ) print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" ) with open(A, "w", encoding="utf-8" ) as f: f.write(json.dumps(A, ensure_ascii=A, indent=A ) ) # merges_file (bpecodes) a = os.path.join(A, "bpecodes" ) if not os.path.isfile(A ): raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" ) a = os.path.join(A, VOCAB_FILES_NAMES["merges_file"] ) shutil.copyfile(A, A ) # model config a = os.path.join(A, "config.json" ) a = { "activation_dropout": args["activation_dropout"], "architectures": ["BioGptForCausalLM"], "attention_probs_dropout_prob": args["attention_dropout"], "bos_token_id": 0, "eos_token_id": 2, "hidden_act": args["activation_fn"], "hidden_dropout_prob": args["dropout"], "hidden_size": args["decoder_embed_dim"], "initializer_range": 0.02, "intermediate_size": args["decoder_ffn_embed_dim"], "layer_norm_eps": 1E-12, "layerdrop": args["decoder_layerdrop"], "max_position_embeddings": args["max_target_positions"], "model_type": "biogpt", "num_attention_heads": args["decoder_attention_heads"], "num_hidden_layers": args["decoder_layers"], "pad_token_id": 1, "scale_embedding": not args["no_scale_embedding"], "tie_word_embeddings": args["share_decoder_input_output_embed"], "vocab_size": src_vocab_size, } # good hparam defaults to start with print(F"""Generating {biogpt_model_config_file}""" ) with open(A, "w", encoding="utf-8" ) as f: f.write(json.dumps(A, ensure_ascii=A, indent=A ) ) # tokenizer config a = os.path.join(A, A ) a = { "bos_token": "<s>", "eos_token": "</s>", "model_max_length": 1024, "pad_token": "<pad>", "special_tokens_map_file": None, "tokenizer_class": "BioGptTokenizer", "unk_token": "<unk>", } print(F"""Generating {biogpt_tokenizer_config_file}""" ) with open(A, "w", encoding="utf-8" ) as f: f.write(json.dumps(A, ensure_ascii=A, indent=A ) ) # model a = chkpt["model"] # remove unneeded keys a = [ "decoder.version", ] for k in ignore_keys: model_state_dict.pop(A, A ) a = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith("output_projection.weight" ): a = model_state_dict.pop(A ) else: a = model_state_dict.pop(A ) a = BioGptConfig.from_pretrained(A ) a = BioGptForCausalLM(A ) # check that it loads ok model_new.load_state_dict(A ) # save a = os.path.join(A, A ) print(F"""Generating {pytorch_weights_dump_path}""" ) torch.save(A, A ) print("Conversion is done!" ) if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--biogpt_checkpoint_path', default=None, type=str, required=True, help=( 'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,' ' bpecodes, etc.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __lowerCAmelCase : Tuple = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
662
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase : Any = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
662
1
import argparse import os import re __lowerCAmelCase : Union[str, Any] = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict __lowerCAmelCase : Dict = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings __lowerCAmelCase : Any = re.compile(r'\s*\(\s*"(\S[^"]+)"') def __magic_name__ ( A : int, A : bool = False ): '''simple docstring''' with open(A, "r", encoding="utf-8" ) as f: a = f.read() a = content.split("\n" ) a = [] a = 0 while line_idx < len(A ): if _re_intro_mapping.search(lines[line_idx] ) is not None: a = len(re.search(R"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 a = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": a = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers a = sorted(A, key=lambda A : _re_identifier.search(A ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(A, "w", encoding="utf-8" ) as f: f.write("\n".join(A ) ) elif "\n".join(A ) != content: return True def __magic_name__ ( A : bool = False ): '''simple docstring''' a = [os.path.join(A, A ) for f in os.listdir(A ) if f.endswith(".py" )] a = [sort_auto_mapping(A, overwrite=A ) for fname in fnames] if not overwrite and any(A ): a = [f for f, d in zip(A, A ) if d] raise ValueError( F"""The following files have auto mappings that need sorting: {", ".join(A )}. Run `make style` to fix""" " this." ) if __name__ == "__main__": __lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') __lowerCAmelCase : Optional[Any] = parser.parse_args() sort_all_auto_mappings(not args.check_only)
662
import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case__ (_UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer SCREAMING_SNAKE_CASE_ : Optional[int] = True SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast SCREAMING_SNAKE_CASE_ : str = True def __UpperCAmelCase ( self : Optional[int] ) -> str: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt a = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] a = {"unk_token": "<unk>"} a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__lowerCamelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__lowerCamelCase ) ) def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]: a = "lower newer" a = "lower newer" return input_text, output_text def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]: a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) a = "lower newer" a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) a = tokens + [tokenizer.unk_token] a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple: a = self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]: a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" ) a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase ) a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase ) a = tokenizer.encode( "sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase ) a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def __UpperCAmelCase ( self : Any ) -> str: a = self.get_tokenizer() a = "Encode this sequence." a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]] # Testing encoder arguments a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(__lowerCamelCase , __lowerCamelCase ) a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) tokenizer.add_special_tokens({"bos_token": "<s>"} ) a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(__lowerCamelCase , __lowerCamelCase ) # Testing spaces after special tokens a = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space a = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) a = "Encode <mask> sequence" a = "Encode <mask>sequence" a = tokenizer.encode(__lowerCamelCase ) a = encoded.index(__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) a = tokenizer.encode(__lowerCamelCase ) a = encoded.index(__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : str ) -> List[str]: pass def __UpperCAmelCase ( self : int ) -> int: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) a = "A, <mask> AllenNLP sentence." a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase ) a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( __lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( __lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): a = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase ) self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase ) self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase ) def __UpperCAmelCase ( self : List[Any] ) -> Dict: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): a = "hello" # `hello` is a token in the vocabulary of `pretrained_name` a = f"""{text_of_1_token} {text_of_1_token}""" a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = f""" {text}""" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
662
1
import json import sys def __magic_name__ ( A : str, A : List[str] ): '''simple docstring''' with open(A, encoding="utf-8" ) as f: a = json.load(A ) a = ["<details>", "<summary>Show updated benchmarks!</summary>", " "] for benchmark_name in sorted(A ): a = results[benchmark_name] a = benchmark_name.split("/" )[-1] output_md.append(F"""### Benchmark: {benchmark_file_name}""" ) a = "| metric |" a = "|--------|" a = "| new / old (diff) |" for metric_name in sorted(A ): a = benchmark_res[metric_name] a = metric_vals["new"] a = metric_vals.get("old", A ) a = metric_vals.get("diff", A ) a = F""" {new_val:f}""" if isinstance(A, (int, float) ) else "None" if old_val is not None: val_str += F""" / {old_val:f}""" if isinstance(A, (int, float) ) else "None" if dif_val is not None: val_str += F""" ({dif_val:f})""" if isinstance(A, (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append("</details>" ) with open(A, "w", encoding="utf-8" ) as f: f.writelines("\n".join(A ) ) if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = sys.argv[1] __lowerCAmelCase : Dict = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
662
from typing import TYPE_CHECKING from ....utils import _LazyModule __lowerCAmelCase : int = {'tokenization_tapex': ['TapexTokenizer']} if TYPE_CHECKING: from .tokenization_tapex import TapexTokenizer else: import sys __lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
662
1
def __magic_name__ ( A : int, A : int, A : int ): '''simple docstring''' if exponent == 1: return base if exponent % 2 == 0: a = _modexpt(A, exponent // 2, A ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(A, exponent - 1, A )) % modulo_value def __magic_name__ ( A : int = 1777, A : int = 1855, A : int = 8 ): '''simple docstring''' a = base for _ in range(1, A ): a = _modexpt(A, A, 10**digits ) return result if __name__ == "__main__": print(F'''{solution() = }''')
662
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __lowerCAmelCase : Dict = { 'configuration_blip': [ 'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlipConfig', 'BlipTextConfig', 'BlipVisionConfig', ], 'processing_blip': ['BlipProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = ['BlipImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[Any] = [ 'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlipModel', 'BlipPreTrainedModel', 'BlipForConditionalGeneration', 'BlipForQuestionAnswering', 'BlipVisionModel', 'BlipTextModel', 'BlipForImageTextRetrieval', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Any = [ 'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFBlipModel', 'TFBlipPreTrainedModel', 'TFBlipForConditionalGeneration', 'TFBlipForQuestionAnswering', 'TFBlipVisionModel', 'TFBlipTextModel', 'TFBlipForImageTextRetrieval', ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys __lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
662
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class snake_case__ (unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : int ) -> Dict: a = tempfile.mkdtemp() a = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "的", "价", "格", "是", "15", "便", "alex", "##andra", ",", "。", "-", "t", "shirt", ] a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) a = { "do_resize": True, "size": {"height": 2_24, "width": 2_24}, "do_center_crop": True, "crop_size": {"height": 18, "width": 18}, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], "do_convert_rgb": True, } a = os.path.join(self.tmpdirname , __lowerCamelCase ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]: return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str: return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple: return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __UpperCAmelCase ( self : int ) -> List[str]: a = self.get_tokenizer() a = self.get_rust_tokenizer() a = self.get_image_processor() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase ) a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase ) self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase ) self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]: a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" ) a = self.get_image_processor(do_normalize=__lowerCamelCase ) a = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCamelCase ) def __UpperCAmelCase ( self : Tuple ) -> Dict: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = self.prepare_image_inputs() a = image_processor(__lowerCamelCase , return_tensors="np" ) a = processor(images=__lowerCamelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __UpperCAmelCase ( self : str ) -> Optional[int]: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = "Alexandra,T-shirt的价格是15便士。" a = processor(text=__lowerCamelCase ) a = tokenizer(__lowerCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCAmelCase ( self : List[Any] ) -> Any: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = "Alexandra,T-shirt的价格是15便士。" a = self.prepare_image_inputs() a = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__lowerCamelCase ): processor() def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] a = processor.batch_decode(__lowerCamelCase ) a = tokenizer.batch_decode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : Dict ) -> List[str]: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = "Alexandra,T-shirt的价格是15便士。" a = self.prepare_image_inputs() a = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
662
import math import flax.linen as nn import jax.numpy as jnp def __magic_name__ ( A : jnp.ndarray, A : int, A : float = 1, A : float = 1, A : float = 1.0E4, A : bool = False, A : float = 1.0, ): '''simple docstring''' assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even""" a = float(embedding_dim // 2 ) a = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) a = min_timescale * jnp.exp(jnp.arange(A, dtype=jnp.floataa ) * -log_timescale_increment ) a = jnp.expand_dims(A, 1 ) * jnp.expand_dims(A, 0 ) # scale embeddings a = scale * emb if flip_sin_to_cos: a = jnp.concatenate([jnp.cos(A ), jnp.sin(A )], axis=1 ) else: a = jnp.concatenate([jnp.sin(A ), jnp.cos(A )], axis=1 ) a = jnp.reshape(A, [jnp.shape(A )[0], embedding_dim] ) return signal class snake_case__ (nn.Module ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = 32 SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa @nn.compact def __call__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> List[Any]: a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__lowerCamelCase ) a = nn.silu(__lowerCamelCase ) a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__lowerCamelCase ) return temb class snake_case__ (nn.Module ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = 32 SCREAMING_SNAKE_CASE_ : bool = False SCREAMING_SNAKE_CASE_ : float = 1 @nn.compact def __call__( self : Tuple , __lowerCamelCase : int ) -> Union[str, Any]: return get_sinusoidal_embeddings( __lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
662
1
import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class snake_case__ (_UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = """ssube/stable-diffusion-x4-upscaler-onnx""" def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Any=0 ) -> Tuple: a = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__lowerCamelCase ) ) a = torch.manual_seed(__lowerCamelCase ) a = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def __UpperCAmelCase ( self : Dict ) -> Optional[Any]: a = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) a = self.get_dummy_inputs() a = pipe(**__lowerCamelCase ).images a = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 5_12, 5_12, 3) a = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def __UpperCAmelCase ( self : Union[str, Any] ) -> int: a = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) a = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) a = self.get_dummy_inputs() a = pipe(**__lowerCamelCase ).images a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) a = np.array( [0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def __UpperCAmelCase ( self : Dict ) -> List[Any]: a = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) a = self.get_dummy_inputs() a = pipe(**__lowerCamelCase ).images a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) a = np.array( [0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def __UpperCAmelCase ( self : Optional[int] ) -> str: a = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) a = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) a = self.get_dummy_inputs() a = pipe(**__lowerCamelCase ).images a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) a = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]: a = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) a = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) a = self.get_dummy_inputs() a = pipe(**__lowerCamelCase ).images a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) a = np.array( [0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class snake_case__ (unittest.TestCase ): """simple docstring""" @property def __UpperCAmelCase ( self : str ) -> Optional[Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __UpperCAmelCase ( self : Dict ) -> List[Any]: a = ort.SessionOptions() a = False return options def __UpperCAmelCase ( self : int ) -> str: a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) a = init_image.resize((1_28, 1_28) ) # using the PNDM scheduler by default a = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) a = "A fantasy landscape, trending on artstation" a = torch.manual_seed(0 ) a = pipe( prompt=__lowerCamelCase , image=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__lowerCamelCase , output_type="np" , ) a = output.images a = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 5_12, 3) a = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def __UpperCAmelCase ( self : List[Any] ) -> List[Any]: a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) a = init_image.resize((1_28, 1_28) ) a = LMSDiscreteScheduler.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" ) a = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) a = "A fantasy landscape, trending on artstation" a = torch.manual_seed(0 ) a = pipe( prompt=__lowerCamelCase , image=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__lowerCamelCase , output_type="np" , ) a = output.images a = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 5_12, 3) a = np.array( [0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
662
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class snake_case__ (unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : int ) -> Dict: a = tempfile.mkdtemp() a = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "的", "价", "格", "是", "15", "便", "alex", "##andra", ",", "。", "-", "t", "shirt", ] a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) a = { "do_resize": True, "size": {"height": 2_24, "width": 2_24}, "do_center_crop": True, "crop_size": {"height": 18, "width": 18}, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], "do_convert_rgb": True, } a = os.path.join(self.tmpdirname , __lowerCamelCase ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]: return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str: return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple: return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __UpperCAmelCase ( self : int ) -> List[str]: a = self.get_tokenizer() a = self.get_rust_tokenizer() a = self.get_image_processor() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase ) a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase ) self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase ) self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]: a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" ) a = self.get_image_processor(do_normalize=__lowerCamelCase ) a = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCamelCase ) def __UpperCAmelCase ( self : Tuple ) -> Dict: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = self.prepare_image_inputs() a = image_processor(__lowerCamelCase , return_tensors="np" ) a = processor(images=__lowerCamelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __UpperCAmelCase ( self : str ) -> Optional[int]: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = "Alexandra,T-shirt的价格是15便士。" a = processor(text=__lowerCamelCase ) a = tokenizer(__lowerCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCAmelCase ( self : List[Any] ) -> Any: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = "Alexandra,T-shirt的价格是15便士。" a = self.prepare_image_inputs() a = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__lowerCamelCase ): processor() def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] a = processor.batch_decode(__lowerCamelCase ) a = tokenizer.batch_decode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : Dict ) -> List[str]: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = "Alexandra,T-shirt的价格是15便士。" a = self.prepare_image_inputs() a = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
662
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class snake_case__ (unittest.TestCase ): """simple docstring""" def __init__( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : str=3 , __lowerCamelCase : str=18 , __lowerCamelCase : Any=30 , __lowerCamelCase : Optional[Any]=4_00 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=32 , __lowerCamelCase : int=True , ) -> List[str]: a = parent a = batch_size a = num_channels a = image_size a = min_resolution a = max_resolution a = do_resize a = size_divisor a = do_rescale def __UpperCAmelCase ( self : List[str] ) -> List[Any]: return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class snake_case__ (_UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = GLPNImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self : Any ) -> Union[str, Any]: a = GLPNImageProcessingTester(self ) @property def __UpperCAmelCase ( self : int ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self : Any ) -> List[Any]: a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "size_divisor" ) ) self.assertTrue(hasattr(__lowerCamelCase , "resample" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) ) def __UpperCAmelCase ( self : Union[str, Any] ) -> str: pass def __UpperCAmelCase ( self : Any ) -> Dict: # Initialize image_processing a = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def __UpperCAmelCase ( self : Optional[Any] ) -> Any: # Initialize image_processing a = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]: # Initialize image_processing a = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
662
import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def __magic_name__ ( A : Union[str, Any] ): '''simple docstring''' a = fname.split(os.path.sep )[-1] return re.search(R"^(.*)_\d+\.jpg$", A ).groups()[0] class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None ) -> Tuple: a = file_names a = image_transform a = label_to_id def __len__( self : Any ) -> Tuple: return len(self.file_names ) def __getitem__( self : List[Any] , __lowerCamelCase : List[Any] ) -> int: a = self.file_names[idx] a = PIL.Image.open(__lowerCamelCase ) a = raw_image.convert("RGB" ) if self.image_transform is not None: a = self.image_transform(__lowerCamelCase ) a = extract_label(__lowerCamelCase ) if self.label_to_id is not None: a = self.label_to_id[label] return {"image": image, "label": label} def __magic_name__ ( A : str, A : int ): '''simple docstring''' if args.with_tracking: a = Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir ) else: a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs a = config["lr"] a = int(config["num_epochs"] ) a = int(config["seed"] ) a = int(config["batch_size"] ) a = config["image_size"] if not isinstance(A, (list, tuple) ): a = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps, "isdigit" ): if args.checkpointing_steps == "epoch": a = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): a = int(args.checkpointing_steps ) else: raise ValueError( F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" ) else: a = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: a = os.path.split(A )[-1].split("." )[0] accelerator.init_trackers(A, A ) # Grab all the image filenames a = [os.path.join(args.data_dir, A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )] # Build the label correspondences a = [extract_label(A ) for fname in file_names] a = list(set(A ) ) id_to_label.sort() a = {lbl: i for i, lbl in enumerate(A )} # Set the seed before splitting the data. np.random.seed(A ) torch.manual_seed(A ) torch.cuda.manual_seed_all(A ) # Split our filenames between train and validation a = np.random.permutation(len(A ) ) a = int(0.8 * len(A ) ) a = random_perm[:cut] a = random_perm[cut:] # For training we use a simple RandomResizedCrop a = Compose([RandomResizedCrop(A, scale=(0.5, 1.0) ), ToTensor()] ) a = PetsDataset( [file_names[i] for i in train_split], image_transform=A, label_to_id=A ) # For evaluation, we use a deterministic Resize a = Compose([Resize(A ), ToTensor()] ) a = PetsDataset([file_names[i] for i in eval_split], image_transform=A, label_to_id=A ) # Instantiate dataloaders. a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 ) a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) a = create_model("resnet50d", pretrained=A, num_classes=len(A ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). a = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): a = False for param in model.get_classifier().parameters(): a = True # We normalize the batches of images to be a bit faster. a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device ) a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer a = torch.optim.Adam(params=model.parameters(), lr=lr / 25 ) # Instantiate learning rate scheduler a = OneCycleLR(optimizer=A, max_lr=A, epochs=A, steps_per_epoch=len(A ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. a , a , a , a , a = accelerator.prepare( A, A, A, A, A ) # We need to keep track of how many total steps we have iterated over a = 0 # We also need to keep track of the starting epoch so files are named properly a = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" ) accelerator.load_state(args.resume_from_checkpoint ) a = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` a = os.path.splitext(A )[0] if "epoch" in training_difference: a = int(training_difference.replace("epoch_", "" ) ) + 1 a = None else: a = int(training_difference.replace("step_", "" ) ) a = resume_step // len(A ) resume_step -= starting_epoch * len(A ) # Now we train the model for epoch in range(A, A ): model.train() if args.with_tracking: a = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step a = accelerator.skip_first_batches(A, A ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader a = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. a = {k: v.to(accelerator.device ) for k, v in batch.items()} a = (batch["image"] - mean) / std a = model(A ) a = torch.nn.functional.cross_entropy(A, batch["label"] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(A ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(A, A ): a = F"""step_{overall_step}""" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: a = os.path.join(args.output_dir, A ) accelerator.save_state(A ) model.eval() a = 0 a = 0 for step, batch in enumerate(A ): # We could avoid this line since we set the accelerator with `device_placement=True`. a = {k: v.to(accelerator.device ) for k, v in batch.items()} a = (batch["image"] - mean) / std with torch.no_grad(): a = model(A ) a = outputs.argmax(dim=-1 ) a , a = accelerator.gather_for_metrics((predictions, batch["label"]) ) a = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() a = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" ) if args.with_tracking: accelerator.log( { "accuracy": 100 * eval_metric, "train_loss": total_loss.item() / len(A ), "epoch": epoch, }, step=A, ) if checkpointing_steps == "epoch": a = F"""epoch_{epoch}""" if args.output_dir is not None: a = os.path.join(args.output_dir, A ) accelerator.save_state(A ) if args.with_tracking: accelerator.end_training() def __magic_name__ ( ): '''simple docstring''' a = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument("--data_dir", required=A, help="The data folder on disk." ) parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." ) parser.add_argument( "--mixed_precision", type=A, default=A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." ) parser.add_argument( "--checkpointing_steps", type=A, default=A, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--output_dir", type=A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", ) parser.add_argument( "--resume_from_checkpoint", type=A, default=A, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", ) parser.add_argument( "--project_dir", type=A, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", ) a = parser.parse_args() a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224} training_function(A, A ) if __name__ == "__main__": main()
662
1
def __magic_name__ ( A : str ): '''simple docstring''' if not all(char in "01" for char in bin_string ): raise ValueError("Non-binary value was passed to the function" ) if not bin_string: raise ValueError("Empty string was passed to the function" ) a = "" while len(A ) % 3 != 0: a = "0" + bin_string a = [ bin_string[index : index + 3] for index in range(len(A ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: a = 0 for index, val in enumerate(A ): oct_val += int(2 ** (2 - index) * int(A ) ) oct_string += str(A ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
662
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __lowerCAmelCase : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8') __lowerCAmelCase : Tuple = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('utf-8').split() __lowerCAmelCase : Dict = '|'.join(sys.argv[1:]) __lowerCAmelCase : List[Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''') __lowerCAmelCase : List[Any] = [x for x in modified_files if regex.match(x)] print(' '.join(relevant_modified_files), end='')
662
1
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : List[Any] = logging.get_logger(__name__) def __magic_name__ ( A : str, A : str ): '''simple docstring''' a = RobertaPreLayerNormConfig.from_pretrained( A, architectures=["RobertaPreLayerNormForMaskedLM"] ) # convert state_dict a = torch.load(hf_hub_download(repo_id=A, filename="pytorch_model.bin" ) ) a = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("roberta." ): a = "roberta_prelayernorm." + tensor_key[len("roberta." ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ): continue a = tensor_value a = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=A, config=A, state_dict=A ) model.save_pretrained(A ) # convert tokenizer a = AutoTokenizer.from_pretrained(A ) tokenizer.save_pretrained(A ) if __name__ == "__main__": __lowerCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint-repo', default=None, type=str, required=True, help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __lowerCAmelCase : str = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
662
def __magic_name__ ( A : int, A : int, A : int ): '''simple docstring''' if exponent == 1: return base if exponent % 2 == 0: a = _modexpt(A, exponent // 2, A ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(A, exponent - 1, A )) % modulo_value def __magic_name__ ( A : int = 1777, A : int = 1855, A : int = 8 ): '''simple docstring''' a = base for _ in range(1, A ): a = _modexpt(A, A, 10**digits ) return result if __name__ == "__main__": print(F'''{solution() = }''')
662
1
from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = input('Enter image url: ').strip() print(F'''Downloading image from {url} ...''') __lowerCAmelCase : str = BeautifulSoup(requests.get(url).content, 'html.parser') # The image URL is in the content field of the first meta tag with property og:image __lowerCAmelCase : List[Any] = soup.find('meta', {'property': 'og:image'})['content'] __lowerCAmelCase : Dict = requests.get(image_url).content __lowerCAmelCase : int = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg''' with open(file_name, 'wb') as fp: fp.write(image_data) print(F'''Done. Image saved to disk as {file_name}.''')
662
def __magic_name__ ( A : str, A : str ): '''simple docstring''' def get_matched_characters(A : str, A : str ) -> str: a = [] a = min(len(_stra ), len(_stra ) ) // 2 for i, l in enumerate(_stra ): a = int(max(0, i - limit ) ) a = int(min(i + limit + 1, len(_stra ) ) ) if l in _stra[left:right]: matched.append(A ) a = F"""{_stra[0:_stra.index(A )]} {_stra[_stra.index(A ) + 1:]}""" return "".join(A ) # matching characters a = get_matched_characters(A, A ) a = get_matched_characters(A, A ) a = len(A ) # transposition a = ( len([(ca, ca) for ca, ca in zip(A, A ) if ca != ca] ) // 2 ) if not match_count: a = 0.0 else: a = ( 1 / 3 * ( match_count / len(A ) + match_count / len(A ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters a = 0 for ca, ca in zip(stra[:4], stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler('hello', 'world'))
662
1
from __future__ import annotations import time from collections.abc import Sequence from random import randint from matplotlib import pyplot as plt def __magic_name__ ( A : Sequence[float], A : int, A : int ): '''simple docstring''' if not arr: return None, None, 0 if low == high: return low, high, arr[low] a = (low + high) // 2 a , a , a = max_subarray(A, A, A ) a , a , a = max_subarray(A, mid + 1, A ) a , a , a = max_cross_sum(A, A, A, A ) if left_sum >= right_sum and left_sum >= cross_sum: return left_low, left_high, left_sum elif right_sum >= left_sum and right_sum >= cross_sum: return right_low, right_high, right_sum return cross_left, cross_right, cross_sum def __magic_name__ ( A : Sequence[float], A : int, A : int, A : int ): '''simple docstring''' a , a = float("-inf" ), -1 a , a = float("-inf" ), -1 a = 0 for i in range(A, low - 1, -1 ): summ += arr[i] if summ > left_sum: a = summ a = i a = 0 for i in range(mid + 1, high + 1 ): summ += arr[i] if summ > right_sum: a = summ a = i return max_left, max_right, (left_sum + right_sum) def __magic_name__ ( A : int ): '''simple docstring''' a = [randint(1, A ) for _ in range(A )] a = time.time() max_subarray(A, 0, input_size - 1 ) a = time.time() return end - start def __magic_name__ ( ): '''simple docstring''' a = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000] a = [time_max_subarray(A ) for input_size in input_sizes] print("No of Inputs\t\tTime Taken" ) for input_size, runtime in zip(A, A ): print(A, "\t\t", A ) plt.plot(A, A ) plt.xlabel("Number of Inputs" ) plt.ylabel("Time taken in seconds" ) plt.show() if __name__ == "__main__": from doctest import testmod testmod()
662
__lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)} def __magic_name__ ( A : int ): '''simple docstring''' return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) ) def __magic_name__ ( ): '''simple docstring''' return sum( number for number in range(1000, 1000000 ) if number == digits_fifth_powers_sum(A ) ) if __name__ == "__main__": print(solution())
662
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __lowerCAmelCase : Any = logging.get_logger(__name__) class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""pixel_values"""] def __init__( self : str , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : float = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 2_55 , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , **__lowerCamelCase : List[Any] , ) -> None: super().__init__(**__lowerCamelCase ) a = size if size is not None else {"shortest_edge": 3_84} a = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase ) a = do_resize a = size # Default value set here for backwards compatibility where the value in config is None a = crop_pct if crop_pct is not None else 2_24 / 2_56 a = resample a = do_rescale a = rescale_factor a = do_normalize a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN a = image_std if image_std is not None else IMAGENET_STANDARD_STD def __UpperCAmelCase ( self : Any , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : float , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Union[str, Any] , ) -> np.ndarray: a = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase ) if "shortest_edge" not in size: raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" ) a = size["shortest_edge"] if shortest_edge < 3_84: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct a = int(shortest_edge / crop_pct ) a = get_resize_output_image_size(__lowerCamelCase , size=__lowerCamelCase , default_to_square=__lowerCamelCase ) a = resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__lowerCamelCase , size=(shortest_edge, shortest_edge) , data_format=__lowerCamelCase , **__lowerCamelCase ) else: # warping (no cropping) when evaluated at 384 or larger return resize( __lowerCamelCase , size=(shortest_edge, shortest_edge) , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[Any] , ) -> Optional[Any]: return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Dict , ) -> np.ndarray: return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def __UpperCAmelCase ( self : int , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : float = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase : int , ) -> PIL.Image.Image: a = do_resize if do_resize is not None else self.do_resize a = crop_pct if crop_pct is not None else self.crop_pct a = resample if resample is not None else self.resample a = do_rescale if do_rescale is not None else self.do_rescale a = rescale_factor if rescale_factor is not None else self.rescale_factor a = do_normalize if do_normalize is not None else self.do_normalize a = image_mean if image_mean is not None else self.image_mean a = image_std if image_std is not None else self.image_std a = size if size is not None else self.size a = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase ) a = make_list_of_images(__lowerCamelCase ) if not valid_images(__lowerCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None: raise ValueError("crop_pct must be specified if size < 384." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. a = [to_numpy_array(__lowerCamelCase ) for image in images] if do_resize: a = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , crop_pct=__lowerCamelCase , resample=__lowerCamelCase ) for image in images] if do_rescale: a = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images] if do_normalize: a = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images] a = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images] a = {"pixel_values": images} return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
662
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class snake_case__ (unittest.TestCase ): """simple docstring""" def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any=7 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : int=30 , __lowerCamelCase : int=4_00 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=1 / 2_55 , __lowerCamelCase : Optional[int]=True , ) -> str: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33} a = parent a = batch_size a = num_channels a = min_resolution a = max_resolution a = do_resize a = size a = do_normalize a = image_mean a = image_std a = do_rescale a = rescale_factor a = do_pad def __UpperCAmelCase ( self : List[Any] ) -> Any: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str=False ) -> List[str]: if not batched: a = image_inputs[0] if isinstance(__lowerCamelCase , Image.Image ): a , a = image.size else: a , a = image.shape[1], image.shape[2] if w < h: a = int(self.size["shortest_edge"] * h / w ) a = self.size["shortest_edge"] elif w > h: a = self.size["shortest_edge"] a = int(self.size["shortest_edge"] * w / h ) else: a = self.size["shortest_edge"] a = self.size["shortest_edge"] else: a = [] for image in image_inputs: a , a = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0] a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class snake_case__ (_UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = DetaImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]: a = DetaImageProcessingTester(self ) @property def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self : Optional[int] ) -> Tuple: a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) ) self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) ) self.assertTrue(hasattr(__lowerCamelCase , "size" ) ) def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: a = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} ) self.assertEqual(image_processor.do_pad , __lowerCamelCase ) def __UpperCAmelCase ( self : Any ) -> int: pass def __UpperCAmelCase ( self : Any ) -> Any: # Initialize image_processing a = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: # Initialize image_processing a = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) # Test not batched input a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCAmelCase ( self : Any ) -> List[str]: # Initialize image_processing a = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __UpperCAmelCase ( self : Any ) -> List[Any]: # prepare image and target a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: a = json.loads(f.read() ) a = {"image_id": 3_97_69, "annotations": target} # encode them a = DetaImageProcessor() a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" ) # verify pixel values a = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase ) a = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) ) # verify area a = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) ) # verify boxes a = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase ) a = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) ) # verify image_id a = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) ) # verify is_crowd a = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) ) # verify class_labels a = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) ) # verify orig_size a = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) ) # verify size a = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) ) @slow def __UpperCAmelCase ( self : Any ) -> Union[str, Any]: # prepare image, target and masks_path a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: a = json.loads(f.read() ) a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target} a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them a = DetaImageProcessor(format="coco_panoptic" ) a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" ) # verify pixel values a = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase ) a = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) ) # verify area a = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) ) # verify boxes a = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase ) a = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) ) # verify image_id a = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) ) # verify is_crowd a = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) ) # verify class_labels a = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) ) # verify masks a = 82_28_73 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase ) # verify orig_size a = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) ) # verify size a = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
662
1
import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class snake_case__ : """simple docstring""" def __init__( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Optional[int]=1_00 , __lowerCamelCase : int=13 , __lowerCamelCase : str=30 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Dict=3 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=32 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Any=4 , __lowerCamelCase : List[str]=37 , __lowerCamelCase : int="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Dict=10 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Dict=3 , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=[0, 1, 2, 3] , ) -> List[str]: a = parent a = 1_00 a = batch_size a = image_size a = patch_size a = num_channels a = is_training a = use_labels a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = type_sequence_label_size a = initializer_range a = scope a = out_indices a = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) a = (image_size // patch_size) ** 2 a = num_patches + 1 def __UpperCAmelCase ( self : List[str] ) -> int: a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) a = self.get_config() return config, pixel_values, labels, pixel_labels def __UpperCAmelCase ( self : Dict ) -> Dict: return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Any ) -> List[Any]: a = BeitModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() a = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Dict ) -> Union[str, Any]: a = BeitForMaskedImageModeling(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() a = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : int ) -> List[Any]: a = self.type_sequence_label_size a = BeitForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() a = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images a = 1 a = BeitForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __UpperCAmelCase ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple ) -> List[Any]: a = self.num_labels a = BeitForSemanticSegmentation(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() a = model(__lowerCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) a = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: a = self.prepare_config_and_inputs() a , a , a , a = config_and_inputs a = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class snake_case__ (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ : int = ( { """feature-extraction""": BeitModel, """image-classification""": BeitForImageClassification, """image-segmentation""": BeitForSemanticSegmentation, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ : Optional[Any] = False SCREAMING_SNAKE_CASE_ : Optional[Any] = False SCREAMING_SNAKE_CASE_ : Optional[int] = False def __UpperCAmelCase ( self : List[Any] ) -> List[Any]: a = BeitModelTester(self ) a = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]: self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def __UpperCAmelCase ( self : Optional[int] ) -> Tuple: pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def __UpperCAmelCase ( self : Optional[int] ) -> Any: pass def __UpperCAmelCase ( self : Tuple ) -> Dict: a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) ) def __UpperCAmelCase ( self : int ) -> Dict: a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCamelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase ) def __UpperCAmelCase ( self : List[Any] ) -> List[Any]: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) def __UpperCAmelCase ( self : Tuple ) -> List[str]: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCamelCase ) def __UpperCAmelCase ( self : List[str] ) -> List[str]: if not self.model_tester.is_training: return a , a = self.model_tester.prepare_config_and_inputs_for_common() a = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(__lowerCamelCase ), BeitForMaskedImageModeling]: continue a = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() a = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) a = model(**__lowerCamelCase ).loss loss.backward() def __UpperCAmelCase ( self : Union[str, Any] ) -> Any: a , a = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return a = False a = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(__lowerCamelCase ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue a = model_class(__lowerCamelCase ) model.gradient_checkpointing_enable() model.to(__lowerCamelCase ) model.train() a = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) a = model(**__lowerCamelCase ).loss loss.backward() def __UpperCAmelCase ( self : List[Any] ) -> List[str]: a , a = self.model_tester.prepare_config_and_inputs_for_common() a = _config_zero_init(__lowerCamelCase ) for model_class in self.all_model_classes: a = model_class(config=__lowerCamelCase ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @slow def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = BeitModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def __magic_name__ ( ): '''simple docstring''' a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class snake_case__ (unittest.TestCase ): """simple docstring""" @cached_property def __UpperCAmelCase ( self : Dict ) -> int: return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def __UpperCAmelCase ( self : Any ) -> Dict: a = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(__lowerCamelCase ) a = self.default_image_processor a = prepare_img() a = image_processor(images=__lowerCamelCase , return_tensors="pt" ).pixel_values.to(__lowerCamelCase ) # prepare bool_masked_pos a = torch.ones((1, 1_96) , dtype=torch.bool ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): a = model(pixel_values=__lowerCamelCase , bool_masked_pos=__lowerCamelCase ) a = outputs.logits # verify the logits a = torch.Size((1, 1_96, 81_92) ) self.assertEqual(logits.shape , __lowerCamelCase ) a = torch.tensor( [[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __lowerCamelCase , atol=1e-2 ) ) @slow def __UpperCAmelCase ( self : int ) -> List[Any]: a = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(__lowerCamelCase ) a = self.default_image_processor a = prepare_img() a = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): a = model(**__lowerCamelCase ) a = outputs.logits # verify the logits a = torch.Size((1, 10_00) ) self.assertEqual(logits.shape , __lowerCamelCase ) a = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) a = 2_81 self.assertEqual(logits.argmax(-1 ).item() , __lowerCamelCase ) @slow def __UpperCAmelCase ( self : str ) -> Optional[int]: a = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( __lowerCamelCase ) a = self.default_image_processor a = prepare_img() a = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): a = model(**__lowerCamelCase ) a = outputs.logits # verify the logits a = torch.Size((1, 2_18_41) ) self.assertEqual(logits.shape , __lowerCamelCase ) a = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) a = 23_96 self.assertEqual(logits.argmax(-1 ).item() , __lowerCamelCase ) @slow def __UpperCAmelCase ( self : Any ) -> Optional[Any]: a = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) a = model.to(__lowerCamelCase ) a = BeitImageProcessor(do_resize=__lowerCamelCase , size=6_40 , do_center_crop=__lowerCamelCase ) a = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) a = Image.open(ds[0]["file"] ) a = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): a = model(**__lowerCamelCase ) a = outputs.logits # verify the logits a = torch.Size((1, 1_50, 1_60, 1_60) ) self.assertEqual(logits.shape , __lowerCamelCase ) a = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: a = torch.tensor( [ [[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]], [[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]], [[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]], ] , device=__lowerCamelCase , ) else: a = torch.tensor( [ [[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]], [[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]], [[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]], ] , device=__lowerCamelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCamelCase , atol=1e-4 ) ) @slow def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]: a = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) a = model.to(__lowerCamelCase ) a = BeitImageProcessor(do_resize=__lowerCamelCase , size=6_40 , do_center_crop=__lowerCamelCase ) a = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) a = Image.open(ds[0]["file"] ) a = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): a = model(**__lowerCamelCase ) a = outputs.logits.detach().cpu() a = image_processor.post_process_semantic_segmentation(outputs=__lowerCamelCase , target_sizes=[(5_00, 3_00)] ) a = torch.Size((5_00, 3_00) ) self.assertEqual(segmentation[0].shape , __lowerCamelCase ) a = image_processor.post_process_semantic_segmentation(outputs=__lowerCamelCase ) a = torch.Size((1_60, 1_60) ) self.assertEqual(segmentation[0].shape , __lowerCamelCase )
662
def __magic_name__ ( A : list ): '''simple docstring''' for i in range(len(A ) - 1, 0, -1 ): a = False for j in range(A, 0, -1 ): if unsorted[j] < unsorted[j - 1]: a , a = unsorted[j - 1], unsorted[j] a = True for j in range(A ): if unsorted[j] > unsorted[j + 1]: a , a = unsorted[j + 1], unsorted[j] a = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip() __lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(',')] print(F'''{cocktail_shaker_sort(unsorted) = }''')
662
1
import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = 0 SCREAMING_SNAKE_CASE_ : bool = False SCREAMING_SNAKE_CASE_ : float = 3.0 class snake_case__ (unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : str ) -> Tuple: # If no defaults are changed, `to_kwargs` returns an empty dict. self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} ) self.assertDictEqual(MockClass(a=2 , b=__lowerCamelCase ).to_kwargs() , {"a": 2, "b": True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} ) @require_cuda def __UpperCAmelCase ( self : str ) -> Optional[Any]: # If no defaults are changed, `to_kwargs` returns an empty dict. a = GradScalerKwargs(init_scale=10_24 , growth_factor=2 ) AcceleratorState._reset_state() a = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) a = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1_024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 20_00 ) self.assertEqual(scaler._enabled , __lowerCamelCase ) @require_multi_gpu def __UpperCAmelCase ( self : Optional[int] ) -> Tuple: a = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] execute_subprocess_async(__lowerCamelCase , env=os.environ.copy() ) if __name__ == "__main__": __lowerCAmelCase : Any = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) __lowerCAmelCase : str = Accelerator(kwargs_handlers=[ddp_scaler]) __lowerCAmelCase : Dict = torch.nn.Linear(100, 200) __lowerCAmelCase : Optional[int] = accelerator.prepare(model) # Check the values changed in kwargs __lowerCAmelCase : Optional[Any] = '' __lowerCAmelCase : List[Any] = model.bucket_bytes_cap // (1024 * 1024) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
662
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo __lowerCAmelCase : Optional[Any] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' __lowerCAmelCase : str = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' __lowerCAmelCase : List[Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case__ (datasets.Metric ): """simple docstring""" def __UpperCAmelCase ( self : int ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[List[List[str]]] , __lowerCamelCase : List[List[str]] , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=__lowerCamelCase , hypotheses=__lowerCamelCase , min_len=__lowerCamelCase , max_len=__lowerCamelCase ) }
662
1
from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class snake_case__ : """simple docstring""" def __init__( self : List[Any] , __lowerCamelCase : Optional[Any] , ) -> Optional[Any]: a = parent a = 13 a = 7 a = True a = True a = False a = True a = 99 a = 32 a = 2 a = 4 a = 37 a = "gelu" a = 0.1 a = 0.1 a = 5_12 a = 16 a = 2 a = 0.02 a = 3 a = 4 a = None def __UpperCAmelCase ( self : int ) -> Any: a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_input_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a = ids_tensor([self.batch_size] , self.num_choices ) a = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int ) -> Any: a = TFDistilBertModel(config=__lowerCamelCase ) a = {"input_ids": input_ids, "attention_mask": input_mask} a = model(__lowerCamelCase ) a = [input_ids, input_mask] a = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ) -> Any: a = TFDistilBertForMaskedLM(config=__lowerCamelCase ) a = {"input_ids": input_ids, "attention_mask": input_mask} a = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Tuple ) -> Optional[Any]: a = TFDistilBertForQuestionAnswering(config=__lowerCamelCase ) a = { "input_ids": input_ids, "attention_mask": input_mask, } a = model(__lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[str] ) -> Tuple: a = self.num_labels a = TFDistilBertForSequenceClassification(__lowerCamelCase ) a = {"input_ids": input_ids, "attention_mask": input_mask} a = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict ) -> Dict: a = self.num_choices a = TFDistilBertForMultipleChoice(__lowerCamelCase ) a = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) ) a = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) ) a = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, } a = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCAmelCase ( self : int , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ) -> Dict: a = self.num_labels a = TFDistilBertForTokenClassification(__lowerCamelCase ) a = {"input_ids": input_ids, "attention_mask": input_mask} a = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]: a = self.prepare_config_and_inputs() ((a) , (a) , (a) , (a) , (a) , (a)) = config_and_inputs a = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class snake_case__ (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) SCREAMING_SNAKE_CASE_ : Tuple = ( { """feature-extraction""": TFDistilBertModel, """fill-mask""": TFDistilBertForMaskedLM, """question-answering""": TFDistilBertForQuestionAnswering, """text-classification""": TFDistilBertForSequenceClassification, """token-classification""": TFDistilBertForTokenClassification, """zero-shot""": TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE_ : Optional[int] = False SCREAMING_SNAKE_CASE_ : List[str] = False def __UpperCAmelCase ( self : Optional[int] ) -> Any: a = TFDistilBertModelTester(self ) a = ConfigTester(self , config_class=__lowerCamelCase , dim=37 ) def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]: self.config_tester.run_common_tests() def __UpperCAmelCase ( self : Any ) -> Any: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*__lowerCamelCase ) def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*__lowerCamelCase ) def __UpperCAmelCase ( self : Tuple ) -> Dict: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*__lowerCamelCase ) def __UpperCAmelCase ( self : Tuple ) -> Optional[int]: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*__lowerCamelCase ) def __UpperCAmelCase ( self : List[str] ) -> List[str]: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*__lowerCamelCase ) def __UpperCAmelCase ( self : List[Any] ) -> int: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*__lowerCamelCase ) @slow def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]: for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): a = TFDistilBertModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_tf class snake_case__ (unittest.TestCase ): """simple docstring""" @slow def __UpperCAmelCase ( self : Optional[int] ) -> int: a = TFDistilBertModel.from_pretrained("distilbert-base-uncased" ) a = tf.constant([[0, 1, 2, 3, 4, 5]] ) a = model(__lowerCamelCase )[0] a = [1, 6, 7_68] self.assertEqual(output.shape , __lowerCamelCase ) a = tf.constant( [ [ [0.19_261_885, -0.13_732_955, 0.4_119_799], [0.22_150_156, -0.07_422_661, 0.39_037_204], [0.22_756_018, -0.0_896_414, 0.3_701_467], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 )
662
import argparse import os import re __lowerCAmelCase : Union[str, Any] = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict __lowerCAmelCase : Dict = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings __lowerCAmelCase : Any = re.compile(r'\s*\(\s*"(\S[^"]+)"') def __magic_name__ ( A : int, A : bool = False ): '''simple docstring''' with open(A, "r", encoding="utf-8" ) as f: a = f.read() a = content.split("\n" ) a = [] a = 0 while line_idx < len(A ): if _re_intro_mapping.search(lines[line_idx] ) is not None: a = len(re.search(R"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 a = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": a = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers a = sorted(A, key=lambda A : _re_identifier.search(A ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(A, "w", encoding="utf-8" ) as f: f.write("\n".join(A ) ) elif "\n".join(A ) != content: return True def __magic_name__ ( A : bool = False ): '''simple docstring''' a = [os.path.join(A, A ) for f in os.listdir(A ) if f.endswith(".py" )] a = [sort_auto_mapping(A, overwrite=A ) for fname in fnames] if not overwrite and any(A ): a = [f for f, d in zip(A, A ) if d] raise ValueError( F"""The following files have auto mappings that need sorting: {", ".join(A )}. Run `make style` to fix""" " this." ) if __name__ == "__main__": __lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') __lowerCAmelCase : Optional[Any] = parser.parse_args() sort_all_auto_mappings(not args.check_only)
662
1
def __magic_name__ ( A : int ): '''simple docstring''' a = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def __magic_name__ ( A : int = 100 ): '''simple docstring''' a = 1 a = 2 for i in range(2, max_n + 1 ): a = pre_numerator a = 2 * i // 3 if i % 3 == 0 else 1 a = cur_numerator a = e_cont * pre_numerator + temp return sum_digits(A ) if __name__ == "__main__": print(F'''{solution() = }''')
662
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowerCAmelCase : int = logging.get_logger(__name__) __lowerCAmelCase : Optional[int] = '▁' __lowerCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model'} __lowerCAmelCase : int = { 'vocab_file': { 'google/reformer-crime-and-punishment': ( 'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model' ) } } __lowerCAmelCase : Any = { 'google/reformer-crime-and-punishment': 52_4288, } class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""] def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Dict=[] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ) -> None: a = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , ) a = vocab_file a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCamelCase ) @property def __UpperCAmelCase ( self : Optional[int] ) -> int: return self.sp_model.get_piece_size() def __UpperCAmelCase ( self : Tuple ) -> Dict[str, int]: a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[Any] ) -> Optional[Any]: a = self.__dict__.copy() a = None return state def __setstate__( self : str , __lowerCamelCase : Tuple ) -> List[Any]: a = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): a = {} a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __UpperCAmelCase ( self : int , __lowerCamelCase : str ) -> List[str]: return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict ) -> Any: return self.sp_model.piece_to_id(__lowerCamelCase ) def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ) -> str: if index < self.sp_model.get_piece_size(): a = self.sp_model.IdToPiece(__lowerCamelCase ) return token def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> List[Any]: a = [] a = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCamelCase ) + token a = [] else: current_sub_tokens.append(__lowerCamelCase ) out_string += self.sp_model.decode(__lowerCamelCase ) return out_string.strip() def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCamelCase , "wb" ) as fi: a = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (out_vocab_file,)
662
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowerCAmelCase : int = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[str] = [ 'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST', 'UniSpeechForCTC', 'UniSpeechForPreTraining', 'UniSpeechForSequenceClassification', 'UniSpeechModel', 'UniSpeechPreTrainedModel', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys __lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
662
from __future__ import annotations import time import numpy as np __lowerCAmelCase : List[str] = [8, 5, 9, 7] __lowerCAmelCase : str = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] __lowerCAmelCase : Optional[Any] = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class snake_case__ : """simple docstring""" def __init__( self : Any , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ) -> None: a = claim_vector a = allocated_resources_table a = maximum_claim_table def __UpperCAmelCase ( self : List[str] ) -> list[int]: return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def __UpperCAmelCase ( self : str ) -> list[int]: return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def __UpperCAmelCase ( self : Dict ) -> list[list[int]]: return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def __UpperCAmelCase ( self : Dict ) -> dict[int, list[int]]: return {self.__need().index(__lowerCamelCase ): i for i in self.__need()} def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Any ) -> None: a = self.__need() a = self.__allocated_resources_table a = self.__available_resources() a = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("_" * 50 + "\n" ) while need_list: a = False for each_need in need_list: a = True for index, need in enumerate(__lowerCamelCase ): if need > available_resources[index]: a = False break if execution: a = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: a = original_need_index print(f"""Process {process_number + 1} is executing.""" ) # remove the process run from stack need_list.remove(__lowerCamelCase ) # update available/freed resources stack a = np.array(__lowerCamelCase ) + np.array( alloc_resources_table[process_number] ) print( "Updated available resource stack for processes: " + " ".join([str(__lowerCamelCase ) for x in available_resources] ) ) break if safe: print("The process is in a safe state.\n" ) else: print("System in unsafe state. Aborting...\n" ) break def __UpperCAmelCase ( self : Any ) -> str: print(" " * 9 + "Allocated Resource Table" ) for item in self.__allocated_resources_table: print( f"""P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}""" + " ".join(f"""{it:>8}""" for it in item ) + "\n" ) print(" " * 9 + "System Resource Table" ) for item in self.__maximum_claim_table: print( f"""P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}""" + " ".join(f"""{it:>8}""" for it in item ) + "\n" ) print( "Current Usage by Active Processes: " + " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) ) print( "Initial Available Resources: " + " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
662
1
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller __lowerCAmelCase : Optional[Any] = 3 def __magic_name__ ( A : int ): '''simple docstring''' print("Generating primitive root of p" ) while True: a = random.randrange(3, A ) if pow(A, 2, A ) == 1: continue if pow(A, A, A ) == 1: continue return g def __magic_name__ ( A : int ): '''simple docstring''' print("Generating prime p..." ) a = rabin_miller.generate_large_prime(A ) # select large prime number. a = primitive_root(A ) # one primitive root on modulo p. a = random.randrange(3, A ) # private_key -> have to be greater than 2 for safety. a = cryptomath.find_mod_inverse(pow(A, A, A ), A ) a = (key_size, e_a, e_a, p) a = (key_size, d) return public_key, private_key def __magic_name__ ( A : str, A : int ): '''simple docstring''' if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ): print("\nWARNING:" ) print( F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n""" "Use a different name or delete these files and re-run this program." ) sys.exit() a , a = generate_key(A ) print(F"""\nWriting public key to file {name}_pubkey.txt...""" ) with open(F"""{name}_pubkey.txt""", "w" ) as fo: fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" ) print(F"""Writing private key to file {name}_privkey.txt...""" ) with open(F"""{name}_privkey.txt""", "w" ) as fo: fo.write(F"""{private_key[0]},{private_key[1]}""" ) def __magic_name__ ( ): '''simple docstring''' print("Making key files..." ) make_key_files("elgamal", 2048 ) print("Key files generation successful" ) if __name__ == "__main__": main()
662
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal __lowerCAmelCase : List[Any] = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = TypeVar('DatasetType', Dataset, IterableDataset) def __magic_name__ ( A : List[DatasetType], A : Optional[List[float]] = None, A : Optional[int] = None, A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ): '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("Unable to interleave an empty list of datasets." ) for i, dataset in enumerate(A ): if not isinstance(A, (Dataset, IterableDataset) ): if isinstance(A, (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ "is an empty dataset dictionary." ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(A )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" ) if i == 0: a , a = ( (Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset) ) elif not isinstance(A, A ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" ) if dataset_type is Dataset: return _interleave_map_style_datasets( A, A, A, info=A, split=A, stopping_strategy=A ) else: return _interleave_iterable_datasets( A, A, A, info=A, split=A, stopping_strategy=A ) def __magic_name__ ( A : List[DatasetType], A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : int = 0, ): '''simple docstring''' if not dsets: raise ValueError("Unable to concatenate an empty list of datasets." ) for i, dataset in enumerate(A ): if not isinstance(A, (Dataset, IterableDataset) ): if isinstance(A, (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ "is an empty dataset dictionary." ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(A )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" ) if i == 0: a , a = ( (Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset) ) elif not isinstance(A, A ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if dataset_type is Dataset: return _concatenate_map_style_datasets(A, info=A, split=A, axis=A ) else: return _concatenate_iterable_datasets(A, info=A, split=A, axis=A )
662
1
from __future__ import annotations import math def __magic_name__ ( A : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(A ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True __lowerCAmelCase : Any = [num for num in range(3, 10_0001, 2) if not is_prime(num)] def __magic_name__ ( A : int ): '''simple docstring''' if not isinstance(A, A ): raise ValueError("n must be an integer" ) if n <= 0: raise ValueError("n must be >= 0" ) a = [] for num in range(len(A ) ): a = 0 while 2 * i * i <= odd_composites[num]: a = odd_composites[num] - 2 * i * i if is_prime(A ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(A ) == n: return list_nums return [] def __magic_name__ ( ): '''simple docstring''' return compute_nums(1 )[0] if __name__ == "__main__": print(F'''{solution() = }''')
662
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: __lowerCAmelCase : Optional[int] = None __lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) __lowerCAmelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} __lowerCAmelCase : List[Any] = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), }, 'tokenizer_file': { 'google/bigbird-roberta-base': ( 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json' ), 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json' ), }, } __lowerCAmelCase : List[str] = { 'google/bigbird-roberta-base': 4096, 'google/bigbird-roberta-large': 4096, 'google/bigbird-base-trivia-itc': 4096, } __lowerCAmelCase : Any = '▁' class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : str = BigBirdTokenizer SCREAMING_SNAKE_CASE_ : str = ["""input_ids""", """attention_mask"""] SCREAMING_SNAKE_CASE_ : List[int] = [] def __init__( self : int , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Tuple="[CLS]" , **__lowerCamelCase : Optional[Any] , ) -> List[Any]: a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( __lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , ) a = vocab_file a = False if not self.vocab_file else True def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1] def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(__lowerCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ): copyfile(self.vocab_file , __lowerCamelCase ) return (out_vocab_file,)
662
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase : Optional[int] = { 'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'], 'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : int = ['BertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Tuple = [ 'BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BertForMaskedLM', 'BertForMultipleChoice', 'BertForNextSentencePrediction', 'BertForPreTraining', 'BertForQuestionAnswering', 'BertForSequenceClassification', 'BertForTokenClassification', 'BertLayer', 'BertLMHeadModel', 'BertModel', 'BertPreTrainedModel', 'load_tf_weights_in_bert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[Any] = [ 'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFBertEmbeddings', 'TFBertForMaskedLM', 'TFBertForMultipleChoice', 'TFBertForNextSentencePrediction', 'TFBertForPreTraining', 'TFBertForQuestionAnswering', 'TFBertForSequenceClassification', 'TFBertForTokenClassification', 'TFBertLMHeadModel', 'TFBertMainLayer', 'TFBertModel', 'TFBertPreTrainedModel', ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[int] = ['TFBertTokenizer'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : int = [ 'FlaxBertForCausalLM', 'FlaxBertForMaskedLM', 'FlaxBertForMultipleChoice', 'FlaxBertForNextSentencePrediction', 'FlaxBertForPreTraining', 'FlaxBertForQuestionAnswering', 'FlaxBertForSequenceClassification', 'FlaxBertForTokenClassification', 'FlaxBertModel', 'FlaxBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys __lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
662
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer __lowerCAmelCase : List[Any] = logging.getLogger(__name__) def __magic_name__ ( ): '''simple docstring''' a = argparse.ArgumentParser( description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." ) parser.add_argument( "--dataset_name", type=A, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", ) parser.add_argument( "--dataset_config", type=A, default="wikitext-103-raw-v1", help="Configuration name of the dataset." ) parser.add_argument( "--tokenizer_name_or_path", type=A, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", ) parser.add_argument( "--shard_size", type=A, default=1000, help="Number of entries to go in a single shard.", ) parser.add_argument("--split", type=A, default="train", choices=["train", "test", "validation"] ) parser.add_argument( "--limit", default=A, type=A, help="Limit the number of shards (used for debugging).", ) parser.add_argument( "--max_length", type=A, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum" " sequence length that is a multiple of 8.", ) parser.add_argument( "--output_dir", default="tf-tpu", type=A, help="Output directory where the TFRecord shards will be saved. If the" " path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord" " shards will be directly saved to a Google Cloud Storage bucket.", ) a = parser.parse_args() return args def __magic_name__ ( A : List[str] ): '''simple docstring''' def fn(A : Tuple ): return tokenizer(examples["text"] ) return fn def __magic_name__ ( A : Any ): '''simple docstring''' a = [] for i in range(len(tokenized_data["input_ids"] ) ): a = { "input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ), "attention_mask": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ), } a = tf.train.Features(feature=A ) a = tf.train.Example(features=A ) a = example.SerializeToString() records.append(A ) return records def __magic_name__ ( A : Union[str, Any] ): '''simple docstring''' a = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split ) if args.limit is not None: a = min(len(A ), args.limit ) a = dataset.select(range(A ) ) print(F"""Limiting the dataset to {args.limit} entries.""" ) a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) a = os.path.join(args.output_dir, args.split ) if not os.path.exists(A ): os.makedirs(A ) else: a = os.path.join(args.output_dir, args.split ) # Tokenize the whole dataset at once. a = tokenize_function(A ) a = dataset.map(A, batched=A, num_proc=4, remove_columns=["text"] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(A : List[Any] ): # Concatenate all texts. a = {k: sum(examples[k], [] ) for k in examples.keys()} a = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 a = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. a = { k: [t[i : i + args.max_length] for i in range(0, A, args.max_length )] for k, t in concatenated_examples.items() } return result a = dataset_tokenized.map(A, batched=A, batch_size=1000, num_proc=4 ) a = 0 a = 0 for shard in range(0, len(A ), args.shard_size ): a = grouped_dataset[shard : shard + args.shard_size] a = len(dataset_snapshot["input_ids"] ) a = os.path.join(A, F"""dataset-{shard_count}-{records_containing}.tfrecord""" ) a = get_serialized_examples(A ) with tf.io.TFRecordWriter(A ) as out_file: for i in range(len(A ) ): a = serialized_examples[i] out_file.write(A ) print("Wrote file {} containing {} records".format(A, A ) ) shard_count += 1 total_records += records_containing with open(F"""split-{args.split}-records-count.txt""", "w" ) as f: print(F"""Total {args.split} records: {total_records}""", file=A ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = parse_args() main(args)
662
1
from collections import deque class snake_case__ : """simple docstring""" def __init__( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : int ) -> None: a = process_name # process name a = arrival_time # arrival time of the process # completion time of finished process or last interrupted time a = arrival_time a = burst_time # remaining burst time a = 0 # total time of the process wait in ready queue a = 0 # time from arrival time to completion time class snake_case__ : """simple docstring""" def __init__( self : int , __lowerCamelCase : int , __lowerCamelCase : list[int] , __lowerCamelCase : deque[Process] , __lowerCamelCase : int , ) -> None: # total number of mlfq's queues a = number_of_queues # time slice of queues that round robin algorithm applied a = time_slices # unfinished process is in this ready_queue a = queue # current time a = current_time # finished process is in this sequence queue a = deque() def __UpperCAmelCase ( self : Optional[Any] ) -> list[str]: a = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def __UpperCAmelCase ( self : int , __lowerCamelCase : list[Process] ) -> list[int]: a = [] for i in range(len(__lowerCamelCase ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : list[Process] ) -> list[int]: a = [] for i in range(len(__lowerCamelCase ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : list[Process] ) -> list[int]: a = [] for i in range(len(__lowerCamelCase ) ): completion_times.append(queue[i].stop_time ) return completion_times def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : deque[Process] ) -> list[int]: return [q.burst_time for q in queue] def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Process ) -> int: process.waiting_time += self.current_time - process.stop_time return process.waiting_time def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : deque[Process] ) -> deque[Process]: a = deque() # sequence deque of finished process while len(__lowerCamelCase ) != 0: a = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(__lowerCamelCase ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 a = 0 # set the process's turnaround time because it is finished a = self.current_time - cp.arrival_time # set the completion time a = self.current_time # add the process to queue that has finished queue finished.append(__lowerCamelCase ) self.finish_queue.extend(__lowerCamelCase ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def __UpperCAmelCase ( self : Dict , __lowerCamelCase : deque[Process] , __lowerCamelCase : int ) -> tuple[deque[Process], deque[Process]]: a = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(__lowerCamelCase ) ): a = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(__lowerCamelCase ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time a = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(__lowerCamelCase ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished a = 0 # set the finish time a = self.current_time # update the process' turnaround time because it is finished a = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(__lowerCamelCase ) self.finish_queue.extend(__lowerCamelCase ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def __UpperCAmelCase ( self : List[Any] ) -> deque[Process]: # all queues except last one have round_robin algorithm for i in range(self.number_of_queues - 1 ): a , a = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest __lowerCAmelCase : List[str] = Process('P1', 0, 53) __lowerCAmelCase : int = Process('P2', 0, 17) __lowerCAmelCase : Tuple = Process('P3', 0, 68) __lowerCAmelCase : Dict = Process('P4', 0, 24) __lowerCAmelCase : Tuple = 3 __lowerCAmelCase : List[str] = [17, 25] __lowerCAmelCase : List[Any] = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])}) __lowerCAmelCase : Any = Process('P1', 0, 53) __lowerCAmelCase : Tuple = Process('P2', 0, 17) __lowerCAmelCase : List[Any] = Process('P3', 0, 68) __lowerCAmelCase : Dict = Process('P4', 0, 24) __lowerCAmelCase : Optional[int] = 3 __lowerCAmelCase : Union[str, Any] = [17, 25] __lowerCAmelCase : Optional[int] = deque([Pa, Pa, Pa, Pa]) __lowerCAmelCase : int = MLFQ(number_of_queues, time_slices, queue, 0) __lowerCAmelCase : Optional[Any] = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F'''waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print completion times of processes(P1, P2, P3, P4) print( F'''completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print total turnaround times of processes(P1, P2, P3, P4) print( F'''turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print sequence of finished processes print( F'''sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}''' )
662
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __magic_name__ ( A : List[str] ): '''simple docstring''' a = {} a = tokenizer(example["content"], truncation=A )["input_ids"] a = len(example["content"] ) / len(output["input_ids"] ) return output __lowerCAmelCase : Dict = HfArgumentParser(PretokenizationArguments) __lowerCAmelCase : str = parser.parse_args() if args.num_workers is None: __lowerCAmelCase : List[Any] = multiprocessing.cpu_count() __lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir) __lowerCAmelCase : List[Any] = time.time() __lowerCAmelCase : str = load_dataset(args.dataset_name, split='train') print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') __lowerCAmelCase : int = time.time() __lowerCAmelCase : Optional[int] = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ 'repo_name', 'path', 'copies', 'size', 'content', 'license', 'hash', 'line_mean', 'line_max', 'alpha_frac', 'autogenerated', ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') __lowerCAmelCase : Tuple = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
662
1
def __magic_name__ ( A : int = 1000 ): '''simple docstring''' a , a = 1, 1 a = [] for i in range(1, n + 1 ): a = prev_numerator + 2 * prev_denominator a = prev_numerator + prev_denominator if len(str(A ) ) > len(str(A ) ): result.append(A ) a = numerator a = denominator return len(A ) if __name__ == "__main__": print(F'''{solution() = }''')
662
import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList __lowerCAmelCase : Union[str, Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif'] class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=1 ) -> Union[str, Any]: a = tokenizer a = dataset a = len(__lowerCamelCase ) if n_tasks is None else n_tasks a = n_copies def __iter__( self : Tuple ) -> str: a = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() ) a = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]: a = start_length a = eof_strings a = tokenizer def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]: a = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) a = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(__lowerCamelCase ) def __magic_name__ ( A : List[Any] ): '''simple docstring''' a = re.split("(%s)" % "|".join(A ), A ) # last string should be "" return "".join(string_list[:-2] ) def __magic_name__ ( A : Union[str, Any], A : Optional[Any], A : List[Any], A : Optional[Any], A : List[str], A : List[Any]=20, **A : Union[str, Any] ): '''simple docstring''' a = defaultdict(A ) # dict of list of generated tokens for step, batch in tqdm(enumerate(A ) ): with torch.no_grad(): a = batch["ids"].shape[-1] a = accelerator.unwrap_model(A ).generate( input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=A, **A ) # each task is generated batch_size times a = batch["task_id"].repeat(A ) a = accelerator.pad_across_processes( A, dim=1, pad_index=tokenizer.pad_token_id ) a , a = accelerator.gather((generated_tokens, generated_tasks) ) a = generated_tokens.cpu().numpy() a = generated_tasks.cpu().numpy() for task, generated_tokens in zip(A, A ): gen_token_dict[task].append(A ) a = [[] for _ in range(A )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: a = tokenizer.decode(A, skip_special_tokens=A, clean_up_tokenization_spaces=A ) code_gens[task].append(remove_last_block(A ) ) return code_gens def __magic_name__ ( ): '''simple docstring''' a = HfArgumentParser(A ) a = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric a = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing a = "false" if args.num_workers is None: a = multiprocessing.cpu_count() # Use dataset load to feed to accelerate a = Accelerator() set_seed(args.seed, device_specific=A ) # Load model and tokenizer a = AutoTokenizer.from_pretrained(args.model_ckpt ) a = tokenizer.eos_token a = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings a = { "do_sample": args.do_sample, "temperature": args.temperature, "max_new_tokens": args.max_new_tokens, "top_p": args.top_p, "top_k": args.top_k, "stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0, A, A )] ), } # Load evaluation dataset and metric a = load_dataset("openai_humaneval" ) a = load_metric("code_eval" ) a = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] ) a = args.n_samples // args.batch_size a = TokenizedDataset(A, human_eval["test"], n_copies=A, n_tasks=A ) # do not confuse args.batch_size, which is actually the num_return_sequences a = DataLoader(A, batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: a = code_eval_metric.compute(references=[""], predictions=[[""]] ) except ValueError as exception: print( "Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`" " flag to enable code evaluation." ) raise exception a , a = accelerator.prepare(A, A ) a = complete_code( A, A, A, A, n_tasks=A, batch_size=args.batch_size, **A, ) if accelerator.is_main_process: a = [] for task in tqdm(range(A ) ): a = human_eval["test"][task]["test"] a = F"""check({human_eval["test"][task]["entry_point"]})""" references.append("\n" + test_func + "\n" + entry_point ) # Evaluate completions with "code_eval" metric a , a = code_eval_metric.compute( references=A, predictions=A, num_workers=args.num_workers ) print(F"""Results: {pass_at_k}""" ) # Save results to json file with open(args.output_file, "w" ) as fp: json.dump(A, A ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
662
1
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class snake_case__ (_UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = TransfoXLTokenizer SCREAMING_SNAKE_CASE_ : List[Any] = False SCREAMING_SNAKE_CASE_ : Union[str, Any] = False def __UpperCAmelCase ( self : str ) -> Optional[int]: super().setUp() a = [ "<unk>", "[CLS]", "[SEP]", "want", "unwanted", "wa", "un", "running", ",", "low", "l", ] a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Any ) -> Union[str, Any]: a = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Tuple ) -> List[Any]: a = "<unk> UNwanted , running" a = "<unk> unwanted, running" return input_text, output_text def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]: a = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__lowerCamelCase ) a = tokenizer.tokenize("<unk> UNwanted , running" ) self.assertListEqual(__lowerCamelCase , ["<unk>", "unwanted", ",", "running"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [0, 4, 8, 7] ) def __UpperCAmelCase ( self : Dict ) -> int: a = TransfoXLTokenizer(lower_case=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["hello", "!", "how", "are", "you", "?"] ) def __UpperCAmelCase ( self : Dict ) -> Dict: a = TransfoXLTokenizer(lower_case=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def __UpperCAmelCase ( self : Tuple ) -> List[str]: a = TransfoXLTokenizer(lower_case=__lowerCamelCase ) a = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?" a = [ "Hello", "(", "bracket", ")", "and", "side", "@-@", "scrolled", "[", "and", "]", "Henry", "'s", "$", "5", "@,@", "000", "with", "3", "@.@", "34", "m", ".", "What", "'s", "up", "!", "?", ] self.assertListEqual(tokenizer.tokenize(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(tokenizer.convert_tokens_to_string(__lowerCamelCase ) , __lowerCamelCase ) def __UpperCAmelCase ( self : str ) -> Dict: a = self.get_tokenizer() a = len(__lowerCamelCase ) tokenizer.add_tokens(["new1", "new2"] ) tokenizer.move_added_token("new1" , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(__lowerCamelCase ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode("new1" ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , "new1" )
662
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase : Any = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
662
1
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor __lowerCAmelCase : Optional[int] = logging.get_logger(__name__) class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : List[str] , *__lowerCamelCase : Dict , **__lowerCamelCase : Union[str, Any] ) -> None: warnings.warn( "The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use CLIPImageProcessor instead." , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase )
662
import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case__ (_UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer SCREAMING_SNAKE_CASE_ : Optional[int] = True SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast SCREAMING_SNAKE_CASE_ : str = True def __UpperCAmelCase ( self : Optional[int] ) -> str: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt a = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] a = {"unk_token": "<unk>"} a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__lowerCamelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__lowerCamelCase ) ) def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]: a = "lower newer" a = "lower newer" return input_text, output_text def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]: a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) a = "lower newer" a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) a = tokens + [tokenizer.unk_token] a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple: a = self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]: a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" ) a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase ) a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase ) a = tokenizer.encode( "sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase ) a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def __UpperCAmelCase ( self : Any ) -> str: a = self.get_tokenizer() a = "Encode this sequence." a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]] # Testing encoder arguments a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(__lowerCamelCase , __lowerCamelCase ) a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) tokenizer.add_special_tokens({"bos_token": "<s>"} ) a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(__lowerCamelCase , __lowerCamelCase ) # Testing spaces after special tokens a = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space a = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) a = "Encode <mask> sequence" a = "Encode <mask>sequence" a = tokenizer.encode(__lowerCamelCase ) a = encoded.index(__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) a = tokenizer.encode(__lowerCamelCase ) a = encoded.index(__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : str ) -> List[str]: pass def __UpperCAmelCase ( self : int ) -> int: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) a = "A, <mask> AllenNLP sentence." a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase ) a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( __lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( __lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): a = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase ) self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase ) self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase ) def __UpperCAmelCase ( self : List[Any] ) -> Dict: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): a = "hello" # `hello` is a token in the vocabulary of `pretrained_name` a = f"""{text_of_1_token} {text_of_1_token}""" a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = f""" {text}""" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
662
1
class snake_case__ : """simple docstring""" def __init__( self : str , __lowerCamelCase : Optional[Any] ) -> int: # we need a list not a string, so do something to change the type a = arr.split("," ) def __UpperCAmelCase ( self : Optional[int] ) -> Dict: a = [int(self.array[0] )] * len(self.array ) a = [int(self.array[0] )] * len(self.array ) for i in range(1 , len(self.array ) ): a = max( int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) ) a = max(sum_value[i] , rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": __lowerCAmelCase : str = input('please input some numbers:') __lowerCAmelCase : Union[str, Any] = SubArray(whole_array) __lowerCAmelCase : str = array.solve_sub_array() print(('the results is:', re))
662
from typing import TYPE_CHECKING from ....utils import _LazyModule __lowerCAmelCase : int = {'tokenization_tapex': ['TapexTokenizer']} if TYPE_CHECKING: from .tokenization_tapex import TapexTokenizer else: import sys __lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
662
1
import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency __lowerCAmelCase : Optional[int] = { 'E': 1_2.7_0, 'T': 9.0_6, 'A': 8.1_7, 'O': 7.5_1, 'I': 6.9_7, 'N': 6.7_5, 'S': 6.3_3, 'H': 6.0_9, 'R': 5.9_9, 'D': 4.2_5, 'L': 4.0_3, 'C': 2.7_8, 'U': 2.7_6, 'M': 2.4_1, 'W': 2.3_6, 'F': 2.2_3, 'G': 2.0_2, 'Y': 1.9_7, 'P': 1.9_3, 'B': 1.2_9, 'V': 0.9_8, 'K': 0.7_7, 'J': 0.1_5, 'X': 0.1_5, 'Q': 0.1_0, 'Z': 0.0_7, } __lowerCAmelCase : Tuple = 'ETAOINSHRDLCUMWFGYPBVKJXQZ' __lowerCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def __magic_name__ ( A : str ): '''simple docstring''' a = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def __magic_name__ ( A : tuple ): '''simple docstring''' return x[0] def __magic_name__ ( A : str ): '''simple docstring''' a = get_letter_count(A ) a = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(A ) a = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find, reverse=A ) a = "".join(freq_to_letter[freq] ) a = list(freq_to_letter_str.items() ) freq_pairs.sort(key=A, reverse=A ) a = [freq_pair[1] for freq_pair in freq_pairs] return "".join(A ) def __magic_name__ ( A : str ): '''simple docstring''' a = get_frequency_order(A ) a = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
662
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __lowerCAmelCase : Dict = { 'configuration_blip': [ 'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlipConfig', 'BlipTextConfig', 'BlipVisionConfig', ], 'processing_blip': ['BlipProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = ['BlipImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[Any] = [ 'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlipModel', 'BlipPreTrainedModel', 'BlipForConditionalGeneration', 'BlipForQuestionAnswering', 'BlipVisionModel', 'BlipTextModel', 'BlipForImageTextRetrieval', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Any = [ 'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFBlipModel', 'TFBlipPreTrainedModel', 'TFBlipForConditionalGeneration', 'TFBlipForQuestionAnswering', 'TFBlipVisionModel', 'TFBlipTextModel', 'TFBlipForImageTextRetrieval', ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys __lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
662
1
from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup __lowerCAmelCase : List[str] = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l=' def __magic_name__ ( A : str = "mumbai" ): '''simple docstring''' a = BeautifulSoup(requests.get(url + location ).content, "html.parser" ) # This attribute finds out all the specifics listed in a job for job in soup.find_all("div", attrs={"data-tn-component": "organicJob"} ): a = job.find("a", attrs={"data-tn-element": "jobTitle"} ).text.strip() a = job.find("span", {"class": "company"} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs('Bangalore'), 1): print(F'''Job {i:>2} is {job[0]} at {job[1]}''')
662
import math import flax.linen as nn import jax.numpy as jnp def __magic_name__ ( A : jnp.ndarray, A : int, A : float = 1, A : float = 1, A : float = 1.0E4, A : bool = False, A : float = 1.0, ): '''simple docstring''' assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even""" a = float(embedding_dim // 2 ) a = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) a = min_timescale * jnp.exp(jnp.arange(A, dtype=jnp.floataa ) * -log_timescale_increment ) a = jnp.expand_dims(A, 1 ) * jnp.expand_dims(A, 0 ) # scale embeddings a = scale * emb if flip_sin_to_cos: a = jnp.concatenate([jnp.cos(A ), jnp.sin(A )], axis=1 ) else: a = jnp.concatenate([jnp.sin(A ), jnp.cos(A )], axis=1 ) a = jnp.reshape(A, [jnp.shape(A )[0], embedding_dim] ) return signal class snake_case__ (nn.Module ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = 32 SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa @nn.compact def __call__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> List[Any]: a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__lowerCamelCase ) a = nn.silu(__lowerCamelCase ) a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__lowerCamelCase ) return temb class snake_case__ (nn.Module ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = 32 SCREAMING_SNAKE_CASE_ : bool = False SCREAMING_SNAKE_CASE_ : float = 1 @nn.compact def __call__( self : Tuple , __lowerCamelCase : int ) -> Union[str, Any]: return get_sinusoidal_embeddings( __lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
662
1
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : torch.FloatTensor class snake_case__ (_UpperCamelCase , _UpperCamelCase ): """simple docstring""" @register_to_config def __init__( self : Union[str, Any] , __lowerCamelCase : int = 6_55_36 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 0 , __lowerCamelCase : str = "fourier" , __lowerCamelCase : bool = True , __lowerCamelCase : bool = False , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , __lowerCamelCase : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , __lowerCamelCase : Tuple[str] = "UNetMidBlock1D" , __lowerCamelCase : str = None , __lowerCamelCase : Tuple[int] = (32, 32, 64) , __lowerCamelCase : str = None , __lowerCamelCase : int = 8 , __lowerCamelCase : int = 1 , __lowerCamelCase : bool = False , ) -> List[str]: super().__init__() a = sample_size # time if time_embedding_type == "fourier": a = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=__lowerCamelCase , log=__lowerCamelCase , flip_sin_to_cos=__lowerCamelCase ) a = 2 * block_out_channels[0] elif time_embedding_type == "positional": a = Timesteps( block_out_channels[0] , flip_sin_to_cos=__lowerCamelCase , downscale_freq_shift=__lowerCamelCase ) a = block_out_channels[0] if use_timestep_embedding: a = block_out_channels[0] * 4 a = TimestepEmbedding( in_channels=__lowerCamelCase , time_embed_dim=__lowerCamelCase , act_fn=__lowerCamelCase , out_dim=block_out_channels[0] , ) a = nn.ModuleList([] ) a = None a = nn.ModuleList([] ) a = None # down a = in_channels for i, down_block_type in enumerate(__lowerCamelCase ): a = output_channel a = block_out_channels[i] if i == 0: input_channel += extra_in_channels a = i == len(__lowerCamelCase ) - 1 a = get_down_block( __lowerCamelCase , num_layers=__lowerCamelCase , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(__lowerCamelCase ) # mid a = get_mid_block( __lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=__lowerCamelCase , add_downsample=__lowerCamelCase , ) # up a = list(reversed(__lowerCamelCase ) ) a = reversed_block_out_channels[0] if out_block_type is None: a = out_channels else: a = block_out_channels[0] for i, up_block_type in enumerate(__lowerCamelCase ): a = output_channel a = ( reversed_block_out_channels[i + 1] if i < len(__lowerCamelCase ) - 1 else final_upsample_channels ) a = i == len(__lowerCamelCase ) - 1 a = get_up_block( __lowerCamelCase , num_layers=__lowerCamelCase , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(__lowerCamelCase ) a = output_channel # out a = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 ) a = get_out_block( out_block_type=__lowerCamelCase , num_groups_out=__lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=__lowerCamelCase , act_fn=__lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , ) def __UpperCAmelCase ( self : Any , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : Union[torch.Tensor, float, int] , __lowerCamelCase : bool = True , ) -> Union[UNetaDOutput, Tuple]: a = timestep if not torch.is_tensor(__lowerCamelCase ): a = torch.tensor([timesteps] , dtype=torch.long , device=sample.device ) elif torch.is_tensor(__lowerCamelCase ) and len(timesteps.shape ) == 0: a = timesteps[None].to(sample.device ) a = self.time_proj(__lowerCamelCase ) if self.config.use_timestep_embedding: a = self.time_mlp(__lowerCamelCase ) else: a = timestep_embed[..., None] a = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) a = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down a = () for downsample_block in self.down_blocks: a , a = downsample_block(hidden_states=__lowerCamelCase , temb=__lowerCamelCase ) down_block_res_samples += res_samples # 3. mid if self.mid_block: a = self.mid_block(__lowerCamelCase , __lowerCamelCase ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): a = down_block_res_samples[-1:] a = down_block_res_samples[:-1] a = upsample_block(__lowerCamelCase , res_hidden_states_tuple=__lowerCamelCase , temb=__lowerCamelCase ) # 5. post-process if self.out_block: a = self.out_block(__lowerCamelCase , __lowerCamelCase ) if not return_dict: return (sample,) return UNetaDOutput(sample=__lowerCamelCase )
662
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class snake_case__ (unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : int ) -> Dict: a = tempfile.mkdtemp() a = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "的", "价", "格", "是", "15", "便", "alex", "##andra", ",", "。", "-", "t", "shirt", ] a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) a = { "do_resize": True, "size": {"height": 2_24, "width": 2_24}, "do_center_crop": True, "crop_size": {"height": 18, "width": 18}, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], "do_convert_rgb": True, } a = os.path.join(self.tmpdirname , __lowerCamelCase ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]: return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str: return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple: return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __UpperCAmelCase ( self : int ) -> List[str]: a = self.get_tokenizer() a = self.get_rust_tokenizer() a = self.get_image_processor() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase ) a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase ) self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase ) self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]: a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" ) a = self.get_image_processor(do_normalize=__lowerCamelCase ) a = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCamelCase ) def __UpperCAmelCase ( self : Tuple ) -> Dict: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = self.prepare_image_inputs() a = image_processor(__lowerCamelCase , return_tensors="np" ) a = processor(images=__lowerCamelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __UpperCAmelCase ( self : str ) -> Optional[int]: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = "Alexandra,T-shirt的价格是15便士。" a = processor(text=__lowerCamelCase ) a = tokenizer(__lowerCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCAmelCase ( self : List[Any] ) -> Any: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = "Alexandra,T-shirt的价格是15便士。" a = self.prepare_image_inputs() a = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__lowerCamelCase ): processor() def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] a = processor.batch_decode(__lowerCamelCase ) a = tokenizer.batch_decode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : Dict ) -> List[str]: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = "Alexandra,T-shirt的价格是15便士。" a = self.prepare_image_inputs() a = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
662
1
def __magic_name__ ( A : str, A : str ): '''simple docstring''' def get_matched_characters(A : str, A : str ) -> str: a = [] a = min(len(_stra ), len(_stra ) ) // 2 for i, l in enumerate(_stra ): a = int(max(0, i - limit ) ) a = int(min(i + limit + 1, len(_stra ) ) ) if l in _stra[left:right]: matched.append(A ) a = F"""{_stra[0:_stra.index(A )]} {_stra[_stra.index(A ) + 1:]}""" return "".join(A ) # matching characters a = get_matched_characters(A, A ) a = get_matched_characters(A, A ) a = len(A ) # transposition a = ( len([(ca, ca) for ca, ca in zip(A, A ) if ca != ca] ) // 2 ) if not match_count: a = 0.0 else: a = ( 1 / 3 * ( match_count / len(A ) + match_count / len(A ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters a = 0 for ca, ca in zip(stra[:4], stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler('hello', 'world'))
662
import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def __magic_name__ ( A : Union[str, Any] ): '''simple docstring''' a = fname.split(os.path.sep )[-1] return re.search(R"^(.*)_\d+\.jpg$", A ).groups()[0] class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None ) -> Tuple: a = file_names a = image_transform a = label_to_id def __len__( self : Any ) -> Tuple: return len(self.file_names ) def __getitem__( self : List[Any] , __lowerCamelCase : List[Any] ) -> int: a = self.file_names[idx] a = PIL.Image.open(__lowerCamelCase ) a = raw_image.convert("RGB" ) if self.image_transform is not None: a = self.image_transform(__lowerCamelCase ) a = extract_label(__lowerCamelCase ) if self.label_to_id is not None: a = self.label_to_id[label] return {"image": image, "label": label} def __magic_name__ ( A : str, A : int ): '''simple docstring''' if args.with_tracking: a = Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir ) else: a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs a = config["lr"] a = int(config["num_epochs"] ) a = int(config["seed"] ) a = int(config["batch_size"] ) a = config["image_size"] if not isinstance(A, (list, tuple) ): a = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps, "isdigit" ): if args.checkpointing_steps == "epoch": a = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): a = int(args.checkpointing_steps ) else: raise ValueError( F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" ) else: a = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: a = os.path.split(A )[-1].split("." )[0] accelerator.init_trackers(A, A ) # Grab all the image filenames a = [os.path.join(args.data_dir, A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )] # Build the label correspondences a = [extract_label(A ) for fname in file_names] a = list(set(A ) ) id_to_label.sort() a = {lbl: i for i, lbl in enumerate(A )} # Set the seed before splitting the data. np.random.seed(A ) torch.manual_seed(A ) torch.cuda.manual_seed_all(A ) # Split our filenames between train and validation a = np.random.permutation(len(A ) ) a = int(0.8 * len(A ) ) a = random_perm[:cut] a = random_perm[cut:] # For training we use a simple RandomResizedCrop a = Compose([RandomResizedCrop(A, scale=(0.5, 1.0) ), ToTensor()] ) a = PetsDataset( [file_names[i] for i in train_split], image_transform=A, label_to_id=A ) # For evaluation, we use a deterministic Resize a = Compose([Resize(A ), ToTensor()] ) a = PetsDataset([file_names[i] for i in eval_split], image_transform=A, label_to_id=A ) # Instantiate dataloaders. a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 ) a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) a = create_model("resnet50d", pretrained=A, num_classes=len(A ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). a = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): a = False for param in model.get_classifier().parameters(): a = True # We normalize the batches of images to be a bit faster. a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device ) a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer a = torch.optim.Adam(params=model.parameters(), lr=lr / 25 ) # Instantiate learning rate scheduler a = OneCycleLR(optimizer=A, max_lr=A, epochs=A, steps_per_epoch=len(A ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. a , a , a , a , a = accelerator.prepare( A, A, A, A, A ) # We need to keep track of how many total steps we have iterated over a = 0 # We also need to keep track of the starting epoch so files are named properly a = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" ) accelerator.load_state(args.resume_from_checkpoint ) a = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` a = os.path.splitext(A )[0] if "epoch" in training_difference: a = int(training_difference.replace("epoch_", "" ) ) + 1 a = None else: a = int(training_difference.replace("step_", "" ) ) a = resume_step // len(A ) resume_step -= starting_epoch * len(A ) # Now we train the model for epoch in range(A, A ): model.train() if args.with_tracking: a = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step a = accelerator.skip_first_batches(A, A ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader a = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. a = {k: v.to(accelerator.device ) for k, v in batch.items()} a = (batch["image"] - mean) / std a = model(A ) a = torch.nn.functional.cross_entropy(A, batch["label"] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(A ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(A, A ): a = F"""step_{overall_step}""" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: a = os.path.join(args.output_dir, A ) accelerator.save_state(A ) model.eval() a = 0 a = 0 for step, batch in enumerate(A ): # We could avoid this line since we set the accelerator with `device_placement=True`. a = {k: v.to(accelerator.device ) for k, v in batch.items()} a = (batch["image"] - mean) / std with torch.no_grad(): a = model(A ) a = outputs.argmax(dim=-1 ) a , a = accelerator.gather_for_metrics((predictions, batch["label"]) ) a = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() a = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" ) if args.with_tracking: accelerator.log( { "accuracy": 100 * eval_metric, "train_loss": total_loss.item() / len(A ), "epoch": epoch, }, step=A, ) if checkpointing_steps == "epoch": a = F"""epoch_{epoch}""" if args.output_dir is not None: a = os.path.join(args.output_dir, A ) accelerator.save_state(A ) if args.with_tracking: accelerator.end_training() def __magic_name__ ( ): '''simple docstring''' a = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument("--data_dir", required=A, help="The data folder on disk." ) parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." ) parser.add_argument( "--mixed_precision", type=A, default=A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." ) parser.add_argument( "--checkpointing_steps", type=A, default=A, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--output_dir", type=A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", ) parser.add_argument( "--resume_from_checkpoint", type=A, default=A, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", ) parser.add_argument( "--project_dir", type=A, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", ) a = parser.parse_args() a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224} training_function(A, A ) if __name__ == "__main__": main()
662
1
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
662
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __lowerCAmelCase : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8') __lowerCAmelCase : Tuple = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('utf-8').split() __lowerCAmelCase : Dict = '|'.join(sys.argv[1:]) __lowerCAmelCase : List[Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''') __lowerCAmelCase : List[Any] = [x for x in modified_files if regex.match(x)] print(' '.join(relevant_modified_files), end='')
662
1
def __magic_name__ ( A : int, A : int ): '''simple docstring''' if b == 0: return 1 if (b % 2) == 0: return actual_power(A, int(b / 2 ) ) * actual_power(A, int(b / 2 ) ) else: return a * actual_power(A, int(b / 2 ) ) * actual_power(A, int(b / 2 ) ) def __magic_name__ ( A : int, A : int ): '''simple docstring''' if b < 0: return 1 / actual_power(A, A ) return actual_power(A, A ) if __name__ == "__main__": print(power(-2, -3))
662
def __magic_name__ ( A : int, A : int, A : int ): '''simple docstring''' if exponent == 1: return base if exponent % 2 == 0: a = _modexpt(A, exponent // 2, A ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(A, exponent - 1, A )) % modulo_value def __magic_name__ ( A : int = 1777, A : int = 1855, A : int = 8 ): '''simple docstring''' a = base for _ in range(1, A ): a = _modexpt(A, A, 10**digits ) return result if __name__ == "__main__": print(F'''{solution() = }''')
662
1
from __future__ import annotations from typing import Any def __magic_name__ ( A : list[Any] ): '''simple docstring''' create_state_space_tree(A, [], 0 ) def __magic_name__ ( A : list[Any], A : list[Any], A : int ): '''simple docstring''' if index == len(A ): print(A ) return create_state_space_tree(A, A, index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(A, A, index + 1 ) current_subsequence.pop() if __name__ == "__main__": __lowerCAmelCase : list[Any] = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(['A', 'B', 'C']) generate_all_subsequences(seq)
662
def __magic_name__ ( A : str, A : str ): '''simple docstring''' def get_matched_characters(A : str, A : str ) -> str: a = [] a = min(len(_stra ), len(_stra ) ) // 2 for i, l in enumerate(_stra ): a = int(max(0, i - limit ) ) a = int(min(i + limit + 1, len(_stra ) ) ) if l in _stra[left:right]: matched.append(A ) a = F"""{_stra[0:_stra.index(A )]} {_stra[_stra.index(A ) + 1:]}""" return "".join(A ) # matching characters a = get_matched_characters(A, A ) a = get_matched_characters(A, A ) a = len(A ) # transposition a = ( len([(ca, ca) for ca, ca in zip(A, A ) if ca != ca] ) // 2 ) if not match_count: a = 0.0 else: a = ( 1 / 3 * ( match_count / len(A ) + match_count / len(A ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters a = 0 for ca, ca in zip(stra[:4], stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler('hello', 'world'))
662
1
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) __lowerCAmelCase : int = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='relu')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='relu')) classifier.add(layers.Dense(units=1, activation='sigmoid')) # Compiling the CNN classifier.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') __lowerCAmelCase : Dict = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) __lowerCAmelCase : Tuple = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) __lowerCAmelCase : Optional[int] = train_datagen.flow_from_directory( 'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) __lowerCAmelCase : List[str] = test_datagen.flow_from_directory( 'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('cnn.h5') # Part 3 - Making new predictions __lowerCAmelCase : List[Any] = tf.keras.preprocessing.image.load_img( 'dataset/single_prediction/image.png', target_size=(64, 64) ) __lowerCAmelCase : Optional[int] = tf.keras.preprocessing.image.img_to_array(test_image) __lowerCAmelCase : Optional[Any] = np.expand_dims(test_image, axis=0) __lowerCAmelCase : Optional[int] = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: __lowerCAmelCase : str = 'Normal' if result[0][0] == 1: __lowerCAmelCase : Tuple = 'Abnormality detected'
662
__lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)} def __magic_name__ ( A : int ): '''simple docstring''' return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) ) def __magic_name__ ( ): '''simple docstring''' return sum( number for number in range(1000, 1000000 ) if number == digits_fifth_powers_sum(A ) ) if __name__ == "__main__": print(solution())
662
1
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = ["""vqvae"""] def __init__( self : Optional[int] , __lowerCamelCase : AutoencoderKL , __lowerCamelCase : UNetaDConditionModel , __lowerCamelCase : Mel , __lowerCamelCase : Union[DDIMScheduler, DDPMScheduler] , ) -> List[Any]: super().__init__() self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase , mel=__lowerCamelCase , vqvae=__lowerCamelCase ) def __UpperCAmelCase ( self : List[str] ) -> int: return 50 if isinstance(self.scheduler , __lowerCamelCase ) else 10_00 @torch.no_grad() def __call__( self : int , __lowerCamelCase : int = 1 , __lowerCamelCase : str = None , __lowerCamelCase : np.ndarray = None , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = None , __lowerCamelCase : torch.Generator = None , __lowerCamelCase : float = 0 , __lowerCamelCase : float = 0 , __lowerCamelCase : torch.Generator = None , __lowerCamelCase : float = 0 , __lowerCamelCase : torch.Tensor = None , __lowerCamelCase : torch.Tensor = None , __lowerCamelCase : List[Any]=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: a = steps or self.get_default_steps() self.scheduler.set_timesteps(__lowerCamelCase ) a = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: a = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: a = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=__lowerCamelCase , device=self.device , ) a = noise a = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(__lowerCamelCase , __lowerCamelCase ) a = self.mel.audio_slice_to_image(__lowerCamelCase ) a = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape( (input_image.height, input_image.width) ) a = (input_image / 2_55) * 2 - 1 a = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: a = self.vqvae.encode(torch.unsqueeze(__lowerCamelCase , 0 ) ).latent_dist.sample( generator=__lowerCamelCase )[0] a = self.vqvae.config.scaling_factor * input_images if start_step > 0: a = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , self.scheduler.timesteps[start_step - 1] ) a = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) a = int(mask_start_secs * pixels_per_second ) a = int(mask_end_secs * pixels_per_second ) a = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , __lowerCamelCase ): a = self.unet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["sample"] else: a = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"] if isinstance(self.scheduler , __lowerCamelCase ): a = self.scheduler.step( model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"] else: a = self.scheduler.step( model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"] if mask is not None: if mask_start > 0: a = mask[:, step, :, :mask_start] if mask_end > 0: a = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance a = 1 / self.vqvae.config.scaling_factor * images a = self.vqvae.decode(__lowerCamelCase )["sample"] a = (images / 2 + 0.5).clamp(0 , 1 ) a = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() a = (images * 2_55).round().astype("uint8" ) a = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(__lowerCamelCase , mode="RGB" ).convert("L" ) for _ in images) ) a = [self.mel.image_to_audio(__lowerCamelCase ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(__lowerCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowerCamelCase ) ) @torch.no_grad() def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Image.Image] , __lowerCamelCase : int = 50 ) -> np.ndarray: assert isinstance(self.scheduler , __lowerCamelCase ) self.scheduler.set_timesteps(__lowerCamelCase ) a = np.array( [np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] ) a = (sample / 2_55) * 2 - 1 a = torch.Tensor(__lowerCamelCase ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): a = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps a = self.scheduler.alphas_cumprod[t] a = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) a = 1 - alpha_prod_t a = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"] a = (1 - alpha_prod_t_prev) ** 0.5 * model_output a = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) a = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def __UpperCAmelCase ( __lowerCamelCase : torch.Tensor , __lowerCamelCase : torch.Tensor , __lowerCamelCase : float ) -> torch.Tensor: a = acos(torch.dot(torch.flatten(__lowerCamelCase ) , torch.flatten(__lowerCamelCase ) ) / torch.norm(__lowerCamelCase ) / torch.norm(__lowerCamelCase ) ) return sin((1 - alpha) * theta ) * xa / sin(__lowerCamelCase ) + sin(alpha * theta ) * xa / sin(__lowerCamelCase )
662
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class snake_case__ (unittest.TestCase ): """simple docstring""" def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any=7 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : int=30 , __lowerCamelCase : int=4_00 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=1 / 2_55 , __lowerCamelCase : Optional[int]=True , ) -> str: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33} a = parent a = batch_size a = num_channels a = min_resolution a = max_resolution a = do_resize a = size a = do_normalize a = image_mean a = image_std a = do_rescale a = rescale_factor a = do_pad def __UpperCAmelCase ( self : List[Any] ) -> Any: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str=False ) -> List[str]: if not batched: a = image_inputs[0] if isinstance(__lowerCamelCase , Image.Image ): a , a = image.size else: a , a = image.shape[1], image.shape[2] if w < h: a = int(self.size["shortest_edge"] * h / w ) a = self.size["shortest_edge"] elif w > h: a = self.size["shortest_edge"] a = int(self.size["shortest_edge"] * w / h ) else: a = self.size["shortest_edge"] a = self.size["shortest_edge"] else: a = [] for image in image_inputs: a , a = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0] a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class snake_case__ (_UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = DetaImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]: a = DetaImageProcessingTester(self ) @property def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self : Optional[int] ) -> Tuple: a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) ) self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) ) self.assertTrue(hasattr(__lowerCamelCase , "size" ) ) def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: a = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} ) self.assertEqual(image_processor.do_pad , __lowerCamelCase ) def __UpperCAmelCase ( self : Any ) -> int: pass def __UpperCAmelCase ( self : Any ) -> Any: # Initialize image_processing a = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: # Initialize image_processing a = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) # Test not batched input a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCAmelCase ( self : Any ) -> List[str]: # Initialize image_processing a = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __UpperCAmelCase ( self : Any ) -> List[Any]: # prepare image and target a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: a = json.loads(f.read() ) a = {"image_id": 3_97_69, "annotations": target} # encode them a = DetaImageProcessor() a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" ) # verify pixel values a = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase ) a = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) ) # verify area a = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) ) # verify boxes a = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase ) a = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) ) # verify image_id a = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) ) # verify is_crowd a = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) ) # verify class_labels a = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) ) # verify orig_size a = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) ) # verify size a = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) ) @slow def __UpperCAmelCase ( self : Any ) -> Union[str, Any]: # prepare image, target and masks_path a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: a = json.loads(f.read() ) a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target} a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them a = DetaImageProcessor(format="coco_panoptic" ) a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" ) # verify pixel values a = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase ) a = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) ) # verify area a = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) ) # verify boxes a = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase ) a = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) ) # verify image_id a = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) ) # verify is_crowd a = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) ) # verify class_labels a = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) ) # verify masks a = 82_28_73 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase ) # verify orig_size a = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) ) # verify size a = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
662
1
from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class snake_case__ : """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = PegasusConfig SCREAMING_SNAKE_CASE_ : Optional[int] = {} SCREAMING_SNAKE_CASE_ : Dict = """gelu""" def __init__( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : Any=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : Any=99 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : List[Any]=37 , __lowerCamelCase : int=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Union[str, Any]=40 , __lowerCamelCase : str=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Tuple=0 , ) -> List[str]: a = parent a = batch_size a = seq_length a = is_training a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = eos_token_id a = pad_token_id a = bos_token_id def __UpperCAmelCase ( self : Dict ) -> List[Any]: a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) a = tf.concat([input_ids, eos_tensor] , axis=1 ) a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) a = prepare_pegasus_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return config, inputs_dict def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ) -> Optional[Any]: a = TFPegasusModel(config=__lowerCamelCase ).get_decoder() a = inputs_dict["input_ids"] a = input_ids[:1, :] a = inputs_dict["attention_mask"][:1, :] a = inputs_dict["head_mask"] a = 1 # first forward pass a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , use_cache=__lowerCamelCase ) a , a = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids a = ids_tensor((self.batch_size, 3) , config.vocab_size ) a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and a = tf.concat([input_ids, next_tokens] , axis=-1 ) a = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) a = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0] a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice a = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) a = output_from_no_past[:, -3:, random_slice_idx] a = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1e-3 ) def __magic_name__ ( A : List[str], A : int, A : Union[str, Any], A : List[str]=None, A : str=None, A : Optional[Any]=None, A : List[str]=None, A : int=None, ): '''simple docstring''' if attention_mask is None: a = tf.cast(tf.math.not_equal(A, config.pad_token_id ), tf.inta ) if decoder_attention_mask is None: a = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ), ], axis=-1, ) if head_mask is None: a = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: a = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: a = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class snake_case__ (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () SCREAMING_SNAKE_CASE_ : Optional[Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else () SCREAMING_SNAKE_CASE_ : List[Any] = ( { """conversational""": TFPegasusForConditionalGeneration, """feature-extraction""": TFPegasusModel, """summarization""": TFPegasusForConditionalGeneration, """text2text-generation""": TFPegasusForConditionalGeneration, """translation""": TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = True SCREAMING_SNAKE_CASE_ : List[str] = False SCREAMING_SNAKE_CASE_ : int = False def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]: a = TFPegasusModelTester(self ) a = ConfigTester(self , config_class=__lowerCamelCase ) def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]: self.config_tester.run_common_tests() def __UpperCAmelCase ( self : List[Any] ) -> Dict: a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase ) @require_sentencepiece @require_tokenizers @require_tf class snake_case__ (unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] SCREAMING_SNAKE_CASE_ : Tuple = [ """California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to""" """ reduce the risk of wildfires.""", """N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""", ] # differs slightly from pytorch, likely due to numerical differences in linear layers SCREAMING_SNAKE_CASE_ : List[str] = """google/pegasus-xsum""" @cached_property def __UpperCAmelCase ( self : Optional[Any] ) -> Any: return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def __UpperCAmelCase ( self : Any ) -> str: a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> Optional[int]: a = self.translate_src_text(**__lowerCamelCase ) assert self.expected_text == generated_words def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Tuple ) -> Dict: a = self.tokenizer(self.src_text , **__lowerCamelCase , padding=__lowerCamelCase , return_tensors="tf" ) a = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCamelCase , ) a = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCamelCase ) return generated_words @slow def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]: self._assert_generated_batch_equal_expected()
662
def __magic_name__ ( A : list ): '''simple docstring''' for i in range(len(A ) - 1, 0, -1 ): a = False for j in range(A, 0, -1 ): if unsorted[j] < unsorted[j - 1]: a , a = unsorted[j - 1], unsorted[j] a = True for j in range(A ): if unsorted[j] > unsorted[j + 1]: a , a = unsorted[j + 1], unsorted[j] a = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip() __lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(',')] print(F'''{cocktail_shaker_sort(unsorted) = }''')
662
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase : Optional[Any] = { 'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'], 'tokenization_lxmert': ['LxmertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Any = ['LxmertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[int] = [ 'LxmertEncoder', 'LxmertForPreTraining', 'LxmertForQuestionAnswering', 'LxmertModel', 'LxmertPreTrainedModel', 'LxmertVisualFeatureEncoder', 'LxmertXLayer', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[Any] = [ 'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLxmertForPreTraining', 'TFLxmertMainLayer', 'TFLxmertModel', 'TFLxmertPreTrainedModel', 'TFLxmertVisualFeatureEncoder', ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys __lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
662
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo __lowerCAmelCase : Optional[Any] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' __lowerCAmelCase : str = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' __lowerCAmelCase : List[Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case__ (datasets.Metric ): """simple docstring""" def __UpperCAmelCase ( self : int ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[List[List[str]]] , __lowerCamelCase : List[List[str]] , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=__lowerCamelCase , hypotheses=__lowerCamelCase , min_len=__lowerCamelCase , max_len=__lowerCamelCase ) }
662
1
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL __lowerCAmelCase : List[str] = logging.get_logger(__name__) def __magic_name__ ( A : np.ndarray, A : Union[int, Iterable[int]], A : bool, A : int ): '''simple docstring''' def constraint_to_multiple_of(A : Any, A : Tuple, A : Optional[int]=0, A : int=None ): a = round(val / multiple ) * multiple if max_val is not None and x > max_val: a = math.floor(val / multiple ) * multiple if x < min_val: a = math.ceil(val / multiple ) * multiple return x a = (output_size, output_size) if isinstance(A, A ) else output_size a , a = get_image_size(A ) a , a = output_size # determine new height and width a = output_height / input_height a = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width a = scale_width else: # fit height a = scale_height a = constraint_to_multiple_of(scale_height * input_height, multiple=A ) a = constraint_to_multiple_of(scale_width * input_width, multiple=A ) return (new_height, new_width) class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = ["""pixel_values"""] def __init__( self : Optional[Any] , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase : bool = False , __lowerCamelCase : int = 1 , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 2_55 , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , **__lowerCamelCase : Any , ) -> None: super().__init__(**__lowerCamelCase ) a = size if size is not None else {"height": 3_84, "width": 3_84} a = get_size_dict(__lowerCamelCase ) a = do_resize a = size a = keep_aspect_ratio a = ensure_multiple_of a = resample a = do_rescale a = rescale_factor a = do_normalize a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN a = image_std if image_std is not None else IMAGENET_STANDARD_STD def __UpperCAmelCase ( self : str , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : bool = False , __lowerCamelCase : int = 1 , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Any , ) -> np.ndarray: a = get_size_dict(__lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) a = get_resize_output_image_size( __lowerCamelCase , output_size=(size["height"], size["width"]) , keep_aspect_ratio=__lowerCamelCase , multiple=__lowerCamelCase , ) return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def __UpperCAmelCase ( self : Dict , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : int , ) -> Tuple: return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Union[str, Any] , ) -> np.ndarray: return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : int = None , __lowerCamelCase : bool = None , __lowerCamelCase : int = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase : Optional[int] , ) -> PIL.Image.Image: a = do_resize if do_resize is not None else self.do_resize a = size if size is not None else self.size a = get_size_dict(__lowerCamelCase ) a = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio a = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of a = resample if resample is not None else self.resample a = do_rescale if do_rescale is not None else self.do_rescale a = rescale_factor if rescale_factor is not None else self.rescale_factor a = do_normalize if do_normalize is not None else self.do_normalize a = image_mean if image_mean is not None else self.image_mean a = image_std if image_std is not None else self.image_std a = make_list_of_images(__lowerCamelCase ) if not valid_images(__lowerCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. a = [to_numpy_array(__lowerCamelCase ) for image in images] if do_resize: a = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images] if do_rescale: a = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images] if do_normalize: a = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images] a = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images] a = {"pixel_values": images} return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase ) def __UpperCAmelCase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : List[Tuple] = None ) -> Any: a = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(__lowerCamelCase ): a = target_sizes.numpy() a = [] for idx in range(len(__lowerCamelCase ) ): a = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=__lowerCamelCase ) a = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(__lowerCamelCase ) else: a = logits.argmax(dim=1 ) a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
662
import argparse import os import re __lowerCAmelCase : Union[str, Any] = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict __lowerCAmelCase : Dict = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings __lowerCAmelCase : Any = re.compile(r'\s*\(\s*"(\S[^"]+)"') def __magic_name__ ( A : int, A : bool = False ): '''simple docstring''' with open(A, "r", encoding="utf-8" ) as f: a = f.read() a = content.split("\n" ) a = [] a = 0 while line_idx < len(A ): if _re_intro_mapping.search(lines[line_idx] ) is not None: a = len(re.search(R"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 a = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": a = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers a = sorted(A, key=lambda A : _re_identifier.search(A ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(A, "w", encoding="utf-8" ) as f: f.write("\n".join(A ) ) elif "\n".join(A ) != content: return True def __magic_name__ ( A : bool = False ): '''simple docstring''' a = [os.path.join(A, A ) for f in os.listdir(A ) if f.endswith(".py" )] a = [sort_auto_mapping(A, overwrite=A ) for fname in fnames] if not overwrite and any(A ): a = [f for f, d in zip(A, A ) if d] raise ValueError( F"""The following files have auto mappings that need sorting: {", ".join(A )}. Run `make style` to fix""" " this." ) if __name__ == "__main__": __lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') __lowerCAmelCase : Optional[Any] = parser.parse_args() sort_all_auto_mappings(not args.check_only)
662
1
def __magic_name__ ( A : int, A : int ): '''simple docstring''' return 1 if input_a == input_a else 0 def __magic_name__ ( ): '''simple docstring''' assert xnor_gate(0, 0 ) == 1 assert xnor_gate(0, 1 ) == 0 assert xnor_gate(1, 0 ) == 0 assert xnor_gate(1, 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
662
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowerCAmelCase : int = logging.get_logger(__name__) __lowerCAmelCase : Optional[int] = '▁' __lowerCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model'} __lowerCAmelCase : int = { 'vocab_file': { 'google/reformer-crime-and-punishment': ( 'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model' ) } } __lowerCAmelCase : Any = { 'google/reformer-crime-and-punishment': 52_4288, } class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""] def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Dict=[] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ) -> None: a = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , ) a = vocab_file a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCamelCase ) @property def __UpperCAmelCase ( self : Optional[int] ) -> int: return self.sp_model.get_piece_size() def __UpperCAmelCase ( self : Tuple ) -> Dict[str, int]: a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[Any] ) -> Optional[Any]: a = self.__dict__.copy() a = None return state def __setstate__( self : str , __lowerCamelCase : Tuple ) -> List[Any]: a = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): a = {} a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __UpperCAmelCase ( self : int , __lowerCamelCase : str ) -> List[str]: return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict ) -> Any: return self.sp_model.piece_to_id(__lowerCamelCase ) def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ) -> str: if index < self.sp_model.get_piece_size(): a = self.sp_model.IdToPiece(__lowerCamelCase ) return token def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> List[Any]: a = [] a = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCamelCase ) + token a = [] else: current_sub_tokens.append(__lowerCamelCase ) out_string += self.sp_model.decode(__lowerCamelCase ) return out_string.strip() def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCamelCase , "wb" ) as fi: a = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (out_vocab_file,)
662
1
from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
662
from __future__ import annotations import time import numpy as np __lowerCAmelCase : List[str] = [8, 5, 9, 7] __lowerCAmelCase : str = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] __lowerCAmelCase : Optional[Any] = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class snake_case__ : """simple docstring""" def __init__( self : Any , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ) -> None: a = claim_vector a = allocated_resources_table a = maximum_claim_table def __UpperCAmelCase ( self : List[str] ) -> list[int]: return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def __UpperCAmelCase ( self : str ) -> list[int]: return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def __UpperCAmelCase ( self : Dict ) -> list[list[int]]: return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def __UpperCAmelCase ( self : Dict ) -> dict[int, list[int]]: return {self.__need().index(__lowerCamelCase ): i for i in self.__need()} def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Any ) -> None: a = self.__need() a = self.__allocated_resources_table a = self.__available_resources() a = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("_" * 50 + "\n" ) while need_list: a = False for each_need in need_list: a = True for index, need in enumerate(__lowerCamelCase ): if need > available_resources[index]: a = False break if execution: a = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: a = original_need_index print(f"""Process {process_number + 1} is executing.""" ) # remove the process run from stack need_list.remove(__lowerCamelCase ) # update available/freed resources stack a = np.array(__lowerCamelCase ) + np.array( alloc_resources_table[process_number] ) print( "Updated available resource stack for processes: " + " ".join([str(__lowerCamelCase ) for x in available_resources] ) ) break if safe: print("The process is in a safe state.\n" ) else: print("System in unsafe state. Aborting...\n" ) break def __UpperCAmelCase ( self : Any ) -> str: print(" " * 9 + "Allocated Resource Table" ) for item in self.__allocated_resources_table: print( f"""P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}""" + " ".join(f"""{it:>8}""" for it in item ) + "\n" ) print(" " * 9 + "System Resource Table" ) for item in self.__maximum_claim_table: print( f"""P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}""" + " ".join(f"""{it:>8}""" for it in item ) + "\n" ) print( "Current Usage by Active Processes: " + " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) ) print( "Initial Available Resources: " + " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
662
1
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class snake_case__ (TensorFormatter[Mapping, """torch.Tensor""", Mapping] ): """simple docstring""" def __init__( self : str , __lowerCamelCase : List[Any]=None , **__lowerCamelCase : Union[str, Any] ) -> int: super().__init__(features=__lowerCamelCase ) a = torch_tensor_kwargs import torch # noqa import torch at initialization def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ) -> str: import torch if isinstance(__lowerCamelCase , __lowerCamelCase ) and column: if all( isinstance(__lowerCamelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(__lowerCamelCase ) return column def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Optional[Any] ) -> List[Any]: import torch if isinstance(__lowerCamelCase , (str, bytes, type(__lowerCamelCase )) ): return value elif isinstance(__lowerCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() a = {} if isinstance(__lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): a = {"dtype": torch.intaa} elif isinstance(__lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): a = {"dtype": torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(__lowerCamelCase , PIL.Image.Image ): a = np.asarray(__lowerCamelCase ) return torch.tensor(__lowerCamelCase , **{**default_dtype, **self.torch_tensor_kwargs} ) def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Optional[int] ) -> List[Any]: import torch # support for torch, tf, jax etc. if hasattr(__lowerCamelCase , "__array__" ) and not isinstance(__lowerCamelCase , torch.Tensor ): a = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(__lowerCamelCase , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(__lowerCamelCase ) for substruct in data_struct] ) elif isinstance(__lowerCamelCase , (list, tuple) ): return self._consolidate([self.recursive_tensorize(__lowerCamelCase ) for substruct in data_struct] ) return self._tensorize(__lowerCamelCase ) def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : dict ) -> Union[str, Any]: return map_nested(self._recursive_tensorize , __lowerCamelCase , map_list=__lowerCamelCase ) def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : pa.Table ) -> Mapping: a = self.numpy_arrow_extractor().extract_row(__lowerCamelCase ) a = self.python_features_decoder.decode_row(__lowerCamelCase ) return self.recursive_tensorize(__lowerCamelCase ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : pa.Table ) -> "torch.Tensor": a = self.numpy_arrow_extractor().extract_column(__lowerCamelCase ) a = self.python_features_decoder.decode_column(__lowerCamelCase , pa_table.column_names[0] ) a = self.recursive_tensorize(__lowerCamelCase ) a = self._consolidate(__lowerCamelCase ) return column def __UpperCAmelCase ( self : Dict , __lowerCamelCase : pa.Table ) -> Mapping: a = self.numpy_arrow_extractor().extract_batch(__lowerCamelCase ) a = self.python_features_decoder.decode_batch(__lowerCamelCase ) a = self.recursive_tensorize(__lowerCamelCase ) for column_name in batch: a = self._consolidate(batch[column_name] ) return batch
662
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal __lowerCAmelCase : List[Any] = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = TypeVar('DatasetType', Dataset, IterableDataset) def __magic_name__ ( A : List[DatasetType], A : Optional[List[float]] = None, A : Optional[int] = None, A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ): '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("Unable to interleave an empty list of datasets." ) for i, dataset in enumerate(A ): if not isinstance(A, (Dataset, IterableDataset) ): if isinstance(A, (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ "is an empty dataset dictionary." ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(A )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" ) if i == 0: a , a = ( (Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset) ) elif not isinstance(A, A ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" ) if dataset_type is Dataset: return _interleave_map_style_datasets( A, A, A, info=A, split=A, stopping_strategy=A ) else: return _interleave_iterable_datasets( A, A, A, info=A, split=A, stopping_strategy=A ) def __magic_name__ ( A : List[DatasetType], A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : int = 0, ): '''simple docstring''' if not dsets: raise ValueError("Unable to concatenate an empty list of datasets." ) for i, dataset in enumerate(A ): if not isinstance(A, (Dataset, IterableDataset) ): if isinstance(A, (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ "is an empty dataset dictionary." ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(A )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" ) if i == 0: a , a = ( (Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset) ) elif not isinstance(A, A ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if dataset_type is Dataset: return _concatenate_map_style_datasets(A, info=A, split=A, axis=A ) else: return _concatenate_iterable_datasets(A, info=A, split=A, axis=A )
662
1
import re def __magic_name__ ( A : str ): '''simple docstring''' if len(re.findall("[ATCG]", A ) ) != len(A ): raise ValueError("Invalid Strand" ) return dna.translate(dna.maketrans("ATCG", "TAGC" ) ) if __name__ == "__main__": import doctest doctest.testmod()
662
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: __lowerCAmelCase : Optional[int] = None __lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) __lowerCAmelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} __lowerCAmelCase : List[Any] = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), }, 'tokenizer_file': { 'google/bigbird-roberta-base': ( 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json' ), 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json' ), }, } __lowerCAmelCase : List[str] = { 'google/bigbird-roberta-base': 4096, 'google/bigbird-roberta-large': 4096, 'google/bigbird-base-trivia-itc': 4096, } __lowerCAmelCase : Any = '▁' class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : str = BigBirdTokenizer SCREAMING_SNAKE_CASE_ : str = ["""input_ids""", """attention_mask"""] SCREAMING_SNAKE_CASE_ : List[int] = [] def __init__( self : int , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Tuple="[CLS]" , **__lowerCamelCase : Optional[Any] , ) -> List[Any]: a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( __lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , ) a = vocab_file a = False if not self.vocab_file else True def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1] def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(__lowerCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ): copyfile(self.vocab_file , __lowerCamelCase ) return (out_vocab_file,)
662
1
import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class snake_case__ (unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Tuple ) -> List[Any]: a = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split() a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) a = { "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", } a = { "feature_size": 1, "padding_value": 0.0, "sampling_rate": 1_60_00, "return_attention_mask": False, "do_normalize": True, } a = tempfile.mkdtemp() a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a = os.path.join(self.tmpdirname , __lowerCamelCase ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__lowerCamelCase ) + "\n" ) with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__lowerCamelCase ) + "\n" ) # load decoder from hub a = "hf-internal-testing/ngram-beam-search-decoder" def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Union[str, Any] ) -> int: a = self.add_kwargs_tokens_map.copy() kwargs.update(__lowerCamelCase ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> int: return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : Tuple , **__lowerCamelCase : Union[str, Any] ) -> int: return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__lowerCamelCase ) def __UpperCAmelCase ( self : str ) -> List[str]: shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self : Any ) -> str: a = self.get_tokenizer() a = self.get_feature_extractor() a = self.get_decoder() a = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase , decoder=__lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) a = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCamelCase ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __lowerCamelCase ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , __lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]: a = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match a = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def __UpperCAmelCase ( self : Any ) -> Dict: a = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(["xx"] ) with self.assertRaisesRegex(__lowerCamelCase , "include" ): WavaVecaProcessorWithLM( tokenizer=__lowerCamelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def __UpperCAmelCase ( self : Any ) -> Dict: a = self.get_feature_extractor() a = self.get_tokenizer() a = self.get_decoder() a = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase , decoder=__lowerCamelCase ) a = floats_list((3, 10_00) ) a = feature_extractor(__lowerCamelCase , return_tensors="np" ) a = processor(__lowerCamelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __UpperCAmelCase ( self : List[str] ) -> List[str]: a = self.get_feature_extractor() a = self.get_tokenizer() a = self.get_decoder() a = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase , decoder=__lowerCamelCase ) a = "This is a test string" a = processor(text=__lowerCamelCase ) a = tokenizer(__lowerCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCAmelCase ( self : str , __lowerCamelCase : Union[str, Any]=(2, 10, 16) , __lowerCamelCase : List[str]=77 ) -> List[str]: np.random.seed(__lowerCamelCase ) return np.random.rand(*__lowerCamelCase ) def __UpperCAmelCase ( self : List[Any] ) -> Dict: a = self.get_feature_extractor() a = self.get_tokenizer() a = self.get_decoder() a = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase , decoder=__lowerCamelCase ) a = self._get_dummy_logits(shape=(10, 16) , seed=13 ) a = processor.decode(__lowerCamelCase ) a = decoder.decode_beams(__lowerCamelCase )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual("</s> <s> </s>" , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ["fork"], ["spawn"]] ) def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Tuple ) -> Dict: a = self.get_feature_extractor() a = self.get_tokenizer() a = self.get_decoder() a = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase , decoder=__lowerCamelCase ) a = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: a = processor.batch_decode(__lowerCamelCase ) else: with get_context(__lowerCamelCase ).Pool() as pool: a = processor.batch_decode(__lowerCamelCase , __lowerCamelCase ) a = list(__lowerCamelCase ) with get_context("fork" ).Pool() as p: a = decoder.decode_beams_batch(__lowerCamelCase , __lowerCamelCase ) a , a , a = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(__lowerCamelCase , decoded_processor.text ) self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text ) self.assertListEqual(__lowerCamelCase , decoded_processor.logit_score ) self.assertListEqual(__lowerCamelCase , decoded_processor.lm_score ) def __UpperCAmelCase ( self : List[str] ) -> str: a = self.get_feature_extractor() a = self.get_tokenizer() a = self.get_decoder() a = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase , decoder=__lowerCamelCase ) a = self._get_dummy_logits() a = 15 a = -20.0 a = -4.0 a = processor.batch_decode( __lowerCamelCase , beam_width=__lowerCamelCase , beam_prune_logp=__lowerCamelCase , token_min_logp=__lowerCamelCase , ) a = decoded_processor_out.text a = list(__lowerCamelCase ) with get_context("fork" ).Pool() as pool: a = decoder.decode_beams_batch( __lowerCamelCase , __lowerCamelCase , beam_width=__lowerCamelCase , beam_prune_logp=__lowerCamelCase , token_min_logp=__lowerCamelCase , ) a = [d[0][0] for d in decoded_decoder_out] a = [d[0][2] for d in decoded_decoder_out] a = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , __lowerCamelCase ) self.assertTrue(np.array_equal(__lowerCamelCase , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.054, -18.447] , __lowerCamelCase , atol=1e-3 ) ) self.assertTrue(np.array_equal(__lowerCamelCase , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.554, -13.9_474] , __lowerCamelCase , atol=1e-3 ) ) def __UpperCAmelCase ( self : List[Any] ) -> int: a = self.get_feature_extractor() a = self.get_tokenizer() a = self.get_decoder() a = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase , decoder=__lowerCamelCase ) a = self._get_dummy_logits() a = 2.0 a = 5.0 a = -20.0 a = True a = processor.batch_decode( __lowerCamelCase , alpha=__lowerCamelCase , beta=__lowerCamelCase , unk_score_offset=__lowerCamelCase , lm_score_boundary=__lowerCamelCase , ) a = decoded_processor_out.text a = list(__lowerCamelCase ) decoder.reset_params( alpha=__lowerCamelCase , beta=__lowerCamelCase , unk_score_offset=__lowerCamelCase , lm_score_boundary=__lowerCamelCase , ) with get_context("fork" ).Pool() as pool: a = decoder.decode_beams_batch( __lowerCamelCase , __lowerCamelCase , ) a = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , __lowerCamelCase ) a = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , __lowerCamelCase ) def __UpperCAmelCase ( self : Dict ) -> int: a = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) a = processor.decoder.model_container[processor.decoder._model_key] a = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute() a = os.listdir(__lowerCamelCase ) a = ["alphabet.json", "language_model"] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : str ) -> Optional[Any]: a = snapshot_download("hf-internal-testing/processor_with_lm" ) a = WavaVecaProcessorWithLM.from_pretrained(__lowerCamelCase ) a = processor.decoder.model_container[processor.decoder._model_key] a = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute() a = os.listdir(__lowerCamelCase ) a = os.listdir(__lowerCamelCase ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : List[str] ) -> List[str]: a = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) a = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" ) a = floats_list((3, 10_00) ) a = processor_wavaveca(__lowerCamelCase , return_tensors="np" ) a = processor_auto(__lowerCamelCase , return_tensors="np" ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 ) a = self._get_dummy_logits() a = processor_wavaveca.batch_decode(__lowerCamelCase ) a = processor_auto.batch_decode(__lowerCamelCase ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def __UpperCAmelCase ( self : str ) -> int: a = self.get_feature_extractor() a = self.get_tokenizer() a = self.get_decoder() a = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase , decoder=__lowerCamelCase ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , ) @staticmethod def __UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : int ) -> Union[str, Any]: a = [d[key] for d in offsets] return retrieved_list def __UpperCAmelCase ( self : Optional[int] ) -> int: a = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) a = self._get_dummy_logits()[0] a = processor.decode(__lowerCamelCase , output_word_offsets=__lowerCamelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue("text" in outputs ) self.assertTrue("word_offsets" in outputs ) self.assertTrue(isinstance(__lowerCamelCase , __lowerCamelCase ) ) self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple: a = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) a = self._get_dummy_logits() a = processor.batch_decode(__lowerCamelCase , output_word_offsets=__lowerCamelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue("text" in outputs ) self.assertTrue("word_offsets" in outputs ) self.assertTrue(isinstance(__lowerCamelCase , __lowerCamelCase ) ) self.assertListEqual( [" ".join(self.get_from_offsets(__lowerCamelCase , "word" ) ) for o in outputs["word_offsets"]] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]: import torch a = load_dataset("common_voice" , "en" , split="train" , streaming=__lowerCamelCase ) a = ds.cast_column("audio" , datasets.Audio(sampling_rate=1_60_00 ) ) a = iter(__lowerCamelCase ) a = next(__lowerCamelCase ) a = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" ) a = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train a = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values with torch.no_grad(): a = model(__lowerCamelCase ).logits.cpu().numpy() a = processor.decode(logits[0] , output_word_offsets=__lowerCamelCase ) a = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate a = [ { "start_time": d["start_offset"] * time_offset, "end_time": d["end_offset"] * time_offset, "word": d["word"], } for d in output["word_offsets"] ] a = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL" # output words self.assertEqual(" ".join(self.get_from_offsets(__lowerCamelCase , "word" ) ) , __lowerCamelCase ) self.assertEqual(" ".join(self.get_from_offsets(__lowerCamelCase , "word" ) ) , output.text ) # output times a = torch.tensor(self.get_from_offsets(__lowerCamelCase , "start_time" ) ) a = torch.tensor(self.get_from_offsets(__lowerCamelCase , "end_time" ) ) # fmt: off a = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] ) a = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] ) # fmt: on self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=0.01 ) ) self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=0.01 ) )
662
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer __lowerCAmelCase : List[Any] = logging.getLogger(__name__) def __magic_name__ ( ): '''simple docstring''' a = argparse.ArgumentParser( description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." ) parser.add_argument( "--dataset_name", type=A, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", ) parser.add_argument( "--dataset_config", type=A, default="wikitext-103-raw-v1", help="Configuration name of the dataset." ) parser.add_argument( "--tokenizer_name_or_path", type=A, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", ) parser.add_argument( "--shard_size", type=A, default=1000, help="Number of entries to go in a single shard.", ) parser.add_argument("--split", type=A, default="train", choices=["train", "test", "validation"] ) parser.add_argument( "--limit", default=A, type=A, help="Limit the number of shards (used for debugging).", ) parser.add_argument( "--max_length", type=A, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum" " sequence length that is a multiple of 8.", ) parser.add_argument( "--output_dir", default="tf-tpu", type=A, help="Output directory where the TFRecord shards will be saved. If the" " path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord" " shards will be directly saved to a Google Cloud Storage bucket.", ) a = parser.parse_args() return args def __magic_name__ ( A : List[str] ): '''simple docstring''' def fn(A : Tuple ): return tokenizer(examples["text"] ) return fn def __magic_name__ ( A : Any ): '''simple docstring''' a = [] for i in range(len(tokenized_data["input_ids"] ) ): a = { "input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ), "attention_mask": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ), } a = tf.train.Features(feature=A ) a = tf.train.Example(features=A ) a = example.SerializeToString() records.append(A ) return records def __magic_name__ ( A : Union[str, Any] ): '''simple docstring''' a = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split ) if args.limit is not None: a = min(len(A ), args.limit ) a = dataset.select(range(A ) ) print(F"""Limiting the dataset to {args.limit} entries.""" ) a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) a = os.path.join(args.output_dir, args.split ) if not os.path.exists(A ): os.makedirs(A ) else: a = os.path.join(args.output_dir, args.split ) # Tokenize the whole dataset at once. a = tokenize_function(A ) a = dataset.map(A, batched=A, num_proc=4, remove_columns=["text"] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(A : List[Any] ): # Concatenate all texts. a = {k: sum(examples[k], [] ) for k in examples.keys()} a = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 a = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. a = { k: [t[i : i + args.max_length] for i in range(0, A, args.max_length )] for k, t in concatenated_examples.items() } return result a = dataset_tokenized.map(A, batched=A, batch_size=1000, num_proc=4 ) a = 0 a = 0 for shard in range(0, len(A ), args.shard_size ): a = grouped_dataset[shard : shard + args.shard_size] a = len(dataset_snapshot["input_ids"] ) a = os.path.join(A, F"""dataset-{shard_count}-{records_containing}.tfrecord""" ) a = get_serialized_examples(A ) with tf.io.TFRecordWriter(A ) as out_file: for i in range(len(A ) ): a = serialized_examples[i] out_file.write(A ) print("Wrote file {} containing {} records".format(A, A ) ) shard_count += 1 total_records += records_containing with open(F"""split-{args.split}-records-count.txt""", "w" ) as f: print(F"""Total {args.split} records: {total_records}""", file=A ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = parse_args() main(args)
662
1
import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow __lowerCAmelCase : List[Any] = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ 'text-classification', 'language-modeling', 'summarization', 'token-classification', 'question-answering', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) __lowerCAmelCase : int = logging.getLogger() def __magic_name__ ( ): '''simple docstring''' a = argparse.ArgumentParser() parser.add_argument("-f" ) a = parser.parse_args() return args.f def __magic_name__ ( A : Union[str, Any], A : Union[str, Any]="eval" ): '''simple docstring''' a = os.path.join(A, F"""{split}_results.json""" ) if os.path.exists(A ): with open(A, "r" ) as f: return json.load(A ) raise ValueError(F"""can't find {path}""" ) __lowerCAmelCase : str = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class snake_case__ (_UpperCamelCase ): """simple docstring""" def __UpperCAmelCase ( self : Optional[Any] ) -> str: a = self.get_auto_remove_tmp_dir() a = f""" run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ): run_flax_glue.main() a = get_results(__lowerCamelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) @slow def __UpperCAmelCase ( self : List[Any] ) -> Dict: a = self.get_auto_remove_tmp_dir() a = f""" run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ): run_clm_flax.main() a = get_results(__lowerCamelCase ) self.assertLess(result["eval_perplexity"] , 1_00 ) @slow def __UpperCAmelCase ( self : Optional[int] ) -> Dict: a = self.get_auto_remove_tmp_dir() a = f""" run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate """.split() with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ): run_summarization_flax.main() a = get_results(__lowerCamelCase , split="test" ) self.assertGreaterEqual(result["test_rouge1"] , 10 ) self.assertGreaterEqual(result["test_rouge2"] , 2 ) self.assertGreaterEqual(result["test_rougeL"] , 7 ) self.assertGreaterEqual(result["test_rougeLsum"] , 7 ) @slow def __UpperCAmelCase ( self : Any ) -> Tuple: a = self.get_auto_remove_tmp_dir() a = f""" run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 """.split() with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ): run_mlm_flax.main() a = get_results(__lowerCamelCase ) self.assertLess(result["eval_perplexity"] , 42 ) @slow def __UpperCAmelCase ( self : Dict ) -> Optional[Any]: a = self.get_auto_remove_tmp_dir() a = f""" run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ): run_ta_mlm_flax.main() a = get_results(__lowerCamelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.42 ) @slow def __UpperCAmelCase ( self : str ) -> List[Any]: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu a = 7 if get_gpu_count() > 1 else 2 a = self.get_auto_remove_tmp_dir() a = f""" run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ): run_flax_ner.main() a = get_results(__lowerCamelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertGreaterEqual(result["eval_f1"] , 0.3 ) @slow def __UpperCAmelCase ( self : Union[str, Any] ) -> str: a = self.get_auto_remove_tmp_dir() a = f""" run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ): run_qa.main() a = get_results(__lowerCamelCase ) self.assertGreaterEqual(result["eval_f1"] , 30 ) self.assertGreaterEqual(result["eval_exact"] , 30 )
662
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __magic_name__ ( A : List[str] ): '''simple docstring''' a = {} a = tokenizer(example["content"], truncation=A )["input_ids"] a = len(example["content"] ) / len(output["input_ids"] ) return output __lowerCAmelCase : Dict = HfArgumentParser(PretokenizationArguments) __lowerCAmelCase : str = parser.parse_args() if args.num_workers is None: __lowerCAmelCase : List[Any] = multiprocessing.cpu_count() __lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir) __lowerCAmelCase : List[Any] = time.time() __lowerCAmelCase : str = load_dataset(args.dataset_name, split='train') print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') __lowerCAmelCase : int = time.time() __lowerCAmelCase : Optional[int] = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ 'repo_name', 'path', 'copies', 'size', 'content', 'license', 'hash', 'line_mean', 'line_max', 'alpha_frac', 'autogenerated', ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') __lowerCAmelCase : Tuple = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
662
1
from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy __lowerCAmelCase : List[str] = logging.get_logger(__name__) class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , **__lowerCamelCase : Any ) -> int: a = feature_size a = sampling_rate a = padding_value a = kwargs.pop("padding_side" , "right" ) a = kwargs.pop("return_attention_mask" , __lowerCamelCase ) super().__init__(**__lowerCamelCase ) def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[ BatchFeature, List[BatchFeature], Dict[str, BatchFeature], Dict[str, List[BatchFeature]], List[Dict[str, BatchFeature]], ] , __lowerCamelCase : Union[bool, str, PaddingStrategy] = True , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , ) -> BatchFeature: # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(__lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): a = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" f""" to this method that includes {self.model_input_names[0]}, but you provided""" f""" {list(processed_features.keys() )}""" ) a = processed_features[self.model_input_names[0]] a = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(__lowerCamelCase ) == 0: if return_attention_mask: a = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch a = required_input[0] if isinstance(__lowerCamelCase , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. a = 0 while len(required_input[index] ) == 0: index += 1 if index < len(__lowerCamelCase ): a = required_input[index][0] if return_tensors is None: if is_tf_tensor(__lowerCamelCase ): a = "tf" elif is_torch_tensor(__lowerCamelCase ): a = "pt" elif isinstance(__lowerCamelCase , (int, float, list, tuple, np.ndarray) ): a = "np" else: raise ValueError( f"""type of {first_element} unknown: {type(__lowerCamelCase )}. """ "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): a = to_numpy(__lowerCamelCase ) else: a = [to_numpy(__lowerCamelCase ) for v in value] # Convert padding_strategy in PaddingStrategy a = self._get_padding_strategies(padding=__lowerCamelCase , max_length=__lowerCamelCase ) a = processed_features[self.model_input_names[0]] a = len(__lowerCamelCase ) if not all(len(__lowerCamelCase ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) a = [] for i in range(__lowerCamelCase ): a = {k: v[i] for k, v in processed_features.items()} # truncation a = self._truncate( __lowerCamelCase , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , truncation=__lowerCamelCase , ) truncated_inputs.append(__lowerCamelCase ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length a = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) a = PaddingStrategy.MAX_LENGTH a = {} for i in range(__lowerCamelCase ): # padding a = self._pad( truncated_inputs[i] , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) for key, value in outputs.items(): if key not in batch_outputs: a = [] if value.dtype is np.dtype(np.floataa ): a = value.astype(np.floataa ) batch_outputs[key].append(__lowerCamelCase ) return BatchFeature(__lowerCamelCase , tensor_type=__lowerCamelCase ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ) -> dict: a = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: a = len(__lowerCamelCase ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of a = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__lowerCamelCase ) < max_length if return_attention_mask and "attention_mask" not in processed_features: a = np.ones(len(__lowerCamelCase ) , dtype=np.intaa ) if needs_to_be_padded: a = max_length - len(__lowerCamelCase ) if self.padding_side == "right": if return_attention_mask: a = np.pad( processed_features["attention_mask"] , (0, difference) ) a = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) a = np.pad( __lowerCamelCase , __lowerCamelCase , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: a = np.pad( processed_features["attention_mask"] , (difference, 0) ) a = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) a = np.pad( __lowerCamelCase , __lowerCamelCase , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ) -> List[str]: if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) a = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of a = len(__lowerCamelCase ) > max_length if needs_to_be_truncated: a = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: a = processed_features["attention_mask"][:max_length] return processed_features def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[str]=False , __lowerCamelCase : int=None ) -> int: # Get padding strategy if padding is not False: if padding is True: a = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(__lowerCamelCase , __lowerCamelCase ): a = PaddingStrategy(__lowerCamelCase ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): a = padding else: a = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( f"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
662
import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList __lowerCAmelCase : Union[str, Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif'] class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=1 ) -> Union[str, Any]: a = tokenizer a = dataset a = len(__lowerCamelCase ) if n_tasks is None else n_tasks a = n_copies def __iter__( self : Tuple ) -> str: a = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() ) a = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]: a = start_length a = eof_strings a = tokenizer def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]: a = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) a = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(__lowerCamelCase ) def __magic_name__ ( A : List[Any] ): '''simple docstring''' a = re.split("(%s)" % "|".join(A ), A ) # last string should be "" return "".join(string_list[:-2] ) def __magic_name__ ( A : Union[str, Any], A : Optional[Any], A : List[Any], A : Optional[Any], A : List[str], A : List[Any]=20, **A : Union[str, Any] ): '''simple docstring''' a = defaultdict(A ) # dict of list of generated tokens for step, batch in tqdm(enumerate(A ) ): with torch.no_grad(): a = batch["ids"].shape[-1] a = accelerator.unwrap_model(A ).generate( input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=A, **A ) # each task is generated batch_size times a = batch["task_id"].repeat(A ) a = accelerator.pad_across_processes( A, dim=1, pad_index=tokenizer.pad_token_id ) a , a = accelerator.gather((generated_tokens, generated_tasks) ) a = generated_tokens.cpu().numpy() a = generated_tasks.cpu().numpy() for task, generated_tokens in zip(A, A ): gen_token_dict[task].append(A ) a = [[] for _ in range(A )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: a = tokenizer.decode(A, skip_special_tokens=A, clean_up_tokenization_spaces=A ) code_gens[task].append(remove_last_block(A ) ) return code_gens def __magic_name__ ( ): '''simple docstring''' a = HfArgumentParser(A ) a = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric a = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing a = "false" if args.num_workers is None: a = multiprocessing.cpu_count() # Use dataset load to feed to accelerate a = Accelerator() set_seed(args.seed, device_specific=A ) # Load model and tokenizer a = AutoTokenizer.from_pretrained(args.model_ckpt ) a = tokenizer.eos_token a = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings a = { "do_sample": args.do_sample, "temperature": args.temperature, "max_new_tokens": args.max_new_tokens, "top_p": args.top_p, "top_k": args.top_k, "stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0, A, A )] ), } # Load evaluation dataset and metric a = load_dataset("openai_humaneval" ) a = load_metric("code_eval" ) a = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] ) a = args.n_samples // args.batch_size a = TokenizedDataset(A, human_eval["test"], n_copies=A, n_tasks=A ) # do not confuse args.batch_size, which is actually the num_return_sequences a = DataLoader(A, batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: a = code_eval_metric.compute(references=[""], predictions=[[""]] ) except ValueError as exception: print( "Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`" " flag to enable code evaluation." ) raise exception a , a = accelerator.prepare(A, A ) a = complete_code( A, A, A, A, n_tasks=A, batch_size=args.batch_size, **A, ) if accelerator.is_main_process: a = [] for task in tqdm(range(A ) ): a = human_eval["test"][task]["test"] a = F"""check({human_eval["test"][task]["entry_point"]})""" references.append("\n" + test_func + "\n" + entry_point ) # Evaluate completions with "code_eval" metric a , a = code_eval_metric.compute( references=A, predictions=A, num_workers=args.num_workers ) print(F"""Results: {pass_at_k}""" ) # Save results to json file with open(args.output_file, "w" ) as fp: json.dump(A, A ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
662
1
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class snake_case__ (unittest.TestCase ): """simple docstring""" def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : str=7 , __lowerCamelCase : int=3 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : Any=4_00 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : int=None , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=[0.5, 0.5, 0.5] , __lowerCamelCase : int=[0.5, 0.5, 0.5] , __lowerCamelCase : int=True , __lowerCamelCase : Union[str, Any]=1 / 2_55 , __lowerCamelCase : Optional[Any]=True , ) -> Optional[Any]: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33} a = parent a = batch_size a = num_channels a = min_resolution a = max_resolution a = do_resize a = size a = do_normalize a = image_mean a = image_std a = do_rescale a = rescale_factor a = do_pad def __UpperCAmelCase ( self : List[str] ) -> List[str]: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : List[Any]=False ) -> Any: if not batched: a = image_inputs[0] if isinstance(__lowerCamelCase , Image.Image ): a , a = image.size else: a , a = image.shape[1], image.shape[2] if w < h: a = int(self.size["shortest_edge"] * h / w ) a = self.size["shortest_edge"] elif w > h: a = self.size["shortest_edge"] a = int(self.size["shortest_edge"] * w / h ) else: a = self.size["shortest_edge"] a = self.size["shortest_edge"] else: a = [] for image in image_inputs: a , a = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0] a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class snake_case__ (_UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = ConditionalDetrImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self : str ) -> str: a = ConditionalDetrImageProcessingTester(self ) @property def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self : Any ) -> Optional[Any]: a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) ) self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "size" ) ) def __UpperCAmelCase ( self : int ) -> List[str]: a = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} ) self.assertEqual(image_processor.do_pad , __lowerCamelCase ) a = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCamelCase ) self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} ) self.assertEqual(image_processor.do_pad , __lowerCamelCase ) def __UpperCAmelCase ( self : List[Any] ) -> Tuple: pass def __UpperCAmelCase ( self : str ) -> Union[str, Any]: # Initialize image_processing a = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCAmelCase ( self : str ) -> Union[str, Any]: # Initialize image_processing a = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) # Test not batched input a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCAmelCase ( self : Any ) -> Dict: # Initialize image_processing a = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple: # prepare image and target a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: a = json.loads(f.read() ) a = {"image_id": 3_97_69, "annotations": target} # encode them a = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" ) a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" ) # verify pixel values a = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase ) a = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) ) # verify area a = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) ) # verify boxes a = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase ) a = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) ) # verify image_id a = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) ) # verify is_crowd a = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) ) # verify class_labels a = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) ) # verify orig_size a = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) ) # verify size a = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) ) @slow def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: # prepare image, target and masks_path a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: a = json.loads(f.read() ) a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target} a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them a = ConditionalDetrImageProcessor(format="coco_panoptic" ) a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" ) # verify pixel values a = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase ) a = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) ) # verify area a = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) ) # verify boxes a = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase ) a = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) ) # verify image_id a = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) ) # verify is_crowd a = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) ) # verify class_labels a = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) ) # verify masks a = 82_28_73 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase ) # verify orig_size a = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) ) # verify size a = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
662
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase : Any = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
662
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase : Any = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
662
import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case__ (_UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer SCREAMING_SNAKE_CASE_ : Optional[int] = True SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast SCREAMING_SNAKE_CASE_ : str = True def __UpperCAmelCase ( self : Optional[int] ) -> str: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt a = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] a = {"unk_token": "<unk>"} a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__lowerCamelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__lowerCamelCase ) ) def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]: a = "lower newer" a = "lower newer" return input_text, output_text def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]: a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) a = "lower newer" a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) a = tokens + [tokenizer.unk_token] a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple: a = self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]: a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" ) a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase ) a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase ) a = tokenizer.encode( "sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase ) a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def __UpperCAmelCase ( self : Any ) -> str: a = self.get_tokenizer() a = "Encode this sequence." a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]] # Testing encoder arguments a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(__lowerCamelCase , __lowerCamelCase ) a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) tokenizer.add_special_tokens({"bos_token": "<s>"} ) a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(__lowerCamelCase , __lowerCamelCase ) # Testing spaces after special tokens a = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space a = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) a = "Encode <mask> sequence" a = "Encode <mask>sequence" a = tokenizer.encode(__lowerCamelCase ) a = encoded.index(__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) a = tokenizer.encode(__lowerCamelCase ) a = encoded.index(__lowerCamelCase ) a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : str ) -> List[str]: pass def __UpperCAmelCase ( self : int ) -> int: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) a = "A, <mask> AllenNLP sentence." a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase ) a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( __lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( __lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): a = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase ) self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase ) self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase ) def __UpperCAmelCase ( self : List[Any] ) -> Dict: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): a = "hello" # `hello` is a token in the vocabulary of `pretrained_name` a = f"""{text_of_1_token} {text_of_1_token}""" a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = f""" {text}""" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) a = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase ) a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
662
1
def __magic_name__ ( A : int, A : list[int], A : int ): '''simple docstring''' def count_of_possible_combinations(A : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(A ) def __magic_name__ ( A : int, A : list[int], A : int ): '''simple docstring''' def count_of_possible_combinations_with_dp_array( A : int, A : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] a = sum( count_of_possible_combinations_with_dp_array(target - item, A ) for item in array ) a = answer return answer a = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(A, A ) def __magic_name__ ( A : int, A : list[int], A : int ): '''simple docstring''' a = [0] * (target + 1) a = 1 for i in range(1, target + 1 ): for j in range(A ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : Optional[int] = 3 __lowerCAmelCase : Dict = 5 __lowerCAmelCase : Tuple = [1, 2, 5] print(combination_sum_iv(n, array, target))
662
from typing import TYPE_CHECKING from ....utils import _LazyModule __lowerCAmelCase : int = {'tokenization_tapex': ['TapexTokenizer']} if TYPE_CHECKING: from .tokenization_tapex import TapexTokenizer else: import sys __lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
662
1
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __lowerCAmelCase : str = logging.get_logger(__name__) __lowerCAmelCase : int = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __lowerCAmelCase : Tuple = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } __lowerCAmelCase : str = {'facebook/blenderbot-3B': 128} class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""] SCREAMING_SNAKE_CASE_ : Dict = BlenderbotTokenizer def __init__( self : Any , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]="replace" , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : int="<unk>" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : List[str]="<mask>" , __lowerCamelCase : Dict=False , __lowerCamelCase : int=True , **__lowerCamelCase : Optional[int] , ) -> List[Any]: super().__init__( __lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , ) a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: a = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) ) a = add_prefix_space a = pre_tok_class(**__lowerCamelCase ) a = add_prefix_space a = "post_processor" a = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) if tokenizer_component_instance: a = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a = tuple(state["sep"] ) if "cls" in state: a = tuple(state["cls"] ) a = False if state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: a = add_prefix_space a = True if state.get("trim_offsets" , __lowerCamelCase ) != trim_offsets: a = trim_offsets a = True if changes_to_apply: a = getattr(__lowerCamelCase , state.pop("type" ) ) a = component_class(**__lowerCamelCase ) setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def __UpperCAmelCase ( self : Optional[int] ) -> str: if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def __UpperCAmelCase ( self : str , __lowerCamelCase : Optional[Any] ) -> Union[str, Any]: a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value a = value def __UpperCAmelCase ( self : Any , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : List[str] ) -> BatchEncoding: a = kwargs.get("is_split_into_words" , __lowerCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def __UpperCAmelCase ( self : Union[str, Any] , *__lowerCamelCase : List[str] , **__lowerCamelCase : Tuple ) -> BatchEncoding: a = kwargs.get("is_split_into_words" , __lowerCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def __UpperCAmelCase ( self : int , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: a = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase ) return tuple(__lowerCamelCase ) def __UpperCAmelCase ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> str: return token_ids_a + [self.eos_token_id] def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : "Conversation" ) -> List[int]: a = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__lowerCamelCase ) a = " ".join(__lowerCamelCase ) a = self.encode(__lowerCamelCase ) if len(__lowerCamelCase ) > self.model_max_length: a = input_ids[-self.model_max_length :] logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
662
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __lowerCAmelCase : Dict = { 'configuration_blip': [ 'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlipConfig', 'BlipTextConfig', 'BlipVisionConfig', ], 'processing_blip': ['BlipProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = ['BlipImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[Any] = [ 'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlipModel', 'BlipPreTrainedModel', 'BlipForConditionalGeneration', 'BlipForQuestionAnswering', 'BlipVisionModel', 'BlipTextModel', 'BlipForImageTextRetrieval', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Any = [ 'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFBlipModel', 'TFBlipPreTrainedModel', 'TFBlipForConditionalGeneration', 'TFBlipForQuestionAnswering', 'TFBlipVisionModel', 'TFBlipTextModel', 'TFBlipForImageTextRetrieval', ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys __lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
662
1
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class snake_case__ (unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ) -> Union[str, Any]: return f"""gaussian_noise_s={seed}_shape={"_".join([str(__lowerCamelCase ) for s in shape] )}.npy""" def __UpperCAmelCase ( self : Any ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[str]=0 , __lowerCamelCase : Dict=(4, 4, 64, 64) , __lowerCamelCase : Union[str, Any]=False ) -> Union[str, Any]: a = jnp.bfloataa if fpaa else jnp.floataa a = jnp.array(load_hf_numpy(self.get_file_format(__lowerCamelCase , __lowerCamelCase ) ) , dtype=__lowerCamelCase ) return image def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Any="CompVis/stable-diffusion-v1-4" ) -> List[str]: a = jnp.bfloataa if fpaa else jnp.floataa a = "bf16" if fpaa else None a , a = FlaxUNetaDConditionModel.from_pretrained( __lowerCamelCase , subfolder="unet" , dtype=__lowerCamelCase , revision=__lowerCamelCase ) return model, params def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : int=0 , __lowerCamelCase : Dict=(4, 77, 7_68) , __lowerCamelCase : Dict=False ) -> Union[str, Any]: a = jnp.bfloataa if fpaa else jnp.floataa a = jnp.array(load_hf_numpy(self.get_file_format(__lowerCamelCase , __lowerCamelCase ) ) , dtype=__lowerCamelCase ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]], [17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]], [8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]], [3, 10_00, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]], # fmt: on ] ) def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ) -> List[Any]: a , a = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=__lowerCamelCase ) a = self.get_latents(__lowerCamelCase , fpaa=__lowerCamelCase ) a = self.get_encoder_hidden_states(__lowerCamelCase , fpaa=__lowerCamelCase ) a = model.apply( {"params": params} , __lowerCamelCase , jnp.array(__lowerCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=__lowerCamelCase , ).sample assert sample.shape == latents.shape a = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) a = jnp.array(__lowerCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]], [17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]], [8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]], [3, 10_00, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]], # fmt: on ] ) def __UpperCAmelCase ( self : int , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : str ) -> List[Any]: a , a = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=__lowerCamelCase ) a = self.get_latents(__lowerCamelCase , shape=(4, 4, 96, 96) , fpaa=__lowerCamelCase ) a = self.get_encoder_hidden_states(__lowerCamelCase , shape=(4, 77, 10_24) , fpaa=__lowerCamelCase ) a = model.apply( {"params": params} , __lowerCamelCase , jnp.array(__lowerCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=__lowerCamelCase , ).sample assert sample.shape == latents.shape a = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) a = jnp.array(__lowerCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-2 )
662
import math import flax.linen as nn import jax.numpy as jnp def __magic_name__ ( A : jnp.ndarray, A : int, A : float = 1, A : float = 1, A : float = 1.0E4, A : bool = False, A : float = 1.0, ): '''simple docstring''' assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even""" a = float(embedding_dim // 2 ) a = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) a = min_timescale * jnp.exp(jnp.arange(A, dtype=jnp.floataa ) * -log_timescale_increment ) a = jnp.expand_dims(A, 1 ) * jnp.expand_dims(A, 0 ) # scale embeddings a = scale * emb if flip_sin_to_cos: a = jnp.concatenate([jnp.cos(A ), jnp.sin(A )], axis=1 ) else: a = jnp.concatenate([jnp.sin(A ), jnp.cos(A )], axis=1 ) a = jnp.reshape(A, [jnp.shape(A )[0], embedding_dim] ) return signal class snake_case__ (nn.Module ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = 32 SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa @nn.compact def __call__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> List[Any]: a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__lowerCamelCase ) a = nn.silu(__lowerCamelCase ) a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__lowerCamelCase ) return temb class snake_case__ (nn.Module ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = 32 SCREAMING_SNAKE_CASE_ : bool = False SCREAMING_SNAKE_CASE_ : float = 1 @nn.compact def __call__( self : Tuple , __lowerCamelCase : int ) -> Union[str, Any]: return get_sinusoidal_embeddings( __lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
662
1
__lowerCAmelCase : str = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def __magic_name__ ( A : bytes ): '''simple docstring''' if not isinstance(A, A ): a = F"""a bytes-like object is required, not '{data.__class__.__name__}'""" raise TypeError(A ) a = "".join(bin(A )[2:].zfill(8 ) for byte in data ) a = len(A ) % 6 != 0 if padding_needed: # The padding that will be added later a = B"=" * ((6 - len(A ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(A ) % 6) else: a = B"" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6], 2 )] for index in range(0, len(A ), 6 ) ).encode() + padding ) def __magic_name__ ( A : str ): '''simple docstring''' if not isinstance(A, A ) and not isinstance(A, A ): a = ( "argument should be a bytes-like object or ASCII string, " F"""not '{encoded_data.__class__.__name__}'""" ) raise TypeError(A ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(A, A ): try: a = encoded_data.decode("utf-8" ) except UnicodeDecodeError: raise ValueError("base64 encoded data should only contain ASCII characters" ) a = encoded_data.count("=" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one a = encoded_data[:-padding] a = "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: a = "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data ) a = [ int(binary_stream[index : index + 8], 2 ) for index in range(0, len(A ), 8 ) ] return bytes(A ) if __name__ == "__main__": import doctest doctest.testmod()
662
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class snake_case__ (unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : int ) -> Dict: a = tempfile.mkdtemp() a = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "的", "价", "格", "是", "15", "便", "alex", "##andra", ",", "。", "-", "t", "shirt", ] a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) a = { "do_resize": True, "size": {"height": 2_24, "width": 2_24}, "do_center_crop": True, "crop_size": {"height": 18, "width": 18}, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], "do_convert_rgb": True, } a = os.path.join(self.tmpdirname , __lowerCamelCase ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]: return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str: return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple: return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __UpperCAmelCase ( self : int ) -> List[str]: a = self.get_tokenizer() a = self.get_rust_tokenizer() a = self.get_image_processor() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase ) a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase ) self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase ) self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]: a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" ) a = self.get_image_processor(do_normalize=__lowerCamelCase ) a = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCamelCase ) def __UpperCAmelCase ( self : Tuple ) -> Dict: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = self.prepare_image_inputs() a = image_processor(__lowerCamelCase , return_tensors="np" ) a = processor(images=__lowerCamelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __UpperCAmelCase ( self : str ) -> Optional[int]: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = "Alexandra,T-shirt的价格是15便士。" a = processor(text=__lowerCamelCase ) a = tokenizer(__lowerCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCAmelCase ( self : List[Any] ) -> Any: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = "Alexandra,T-shirt的价格是15便士。" a = self.prepare_image_inputs() a = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__lowerCamelCase ): processor() def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] a = processor.batch_decode(__lowerCamelCase ) a = tokenizer.batch_decode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : Dict ) -> List[str]: a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) a = "Alexandra,T-shirt的价格是15便士。" a = self.prepare_image_inputs() a = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
662
1
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer __lowerCAmelCase : List[Any] = logging.getLogger(__name__) def __magic_name__ ( ): '''simple docstring''' a = argparse.ArgumentParser( description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." ) parser.add_argument( "--dataset_name", type=A, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", ) parser.add_argument( "--dataset_config", type=A, default="wikitext-103-raw-v1", help="Configuration name of the dataset." ) parser.add_argument( "--tokenizer_name_or_path", type=A, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", ) parser.add_argument( "--shard_size", type=A, default=1000, help="Number of entries to go in a single shard.", ) parser.add_argument("--split", type=A, default="train", choices=["train", "test", "validation"] ) parser.add_argument( "--limit", default=A, type=A, help="Limit the number of shards (used for debugging).", ) parser.add_argument( "--max_length", type=A, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum" " sequence length that is a multiple of 8.", ) parser.add_argument( "--output_dir", default="tf-tpu", type=A, help="Output directory where the TFRecord shards will be saved. If the" " path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord" " shards will be directly saved to a Google Cloud Storage bucket.", ) a = parser.parse_args() return args def __magic_name__ ( A : List[str] ): '''simple docstring''' def fn(A : Tuple ): return tokenizer(examples["text"] ) return fn def __magic_name__ ( A : Any ): '''simple docstring''' a = [] for i in range(len(tokenized_data["input_ids"] ) ): a = { "input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ), "attention_mask": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ), } a = tf.train.Features(feature=A ) a = tf.train.Example(features=A ) a = example.SerializeToString() records.append(A ) return records def __magic_name__ ( A : Union[str, Any] ): '''simple docstring''' a = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split ) if args.limit is not None: a = min(len(A ), args.limit ) a = dataset.select(range(A ) ) print(F"""Limiting the dataset to {args.limit} entries.""" ) a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) a = os.path.join(args.output_dir, args.split ) if not os.path.exists(A ): os.makedirs(A ) else: a = os.path.join(args.output_dir, args.split ) # Tokenize the whole dataset at once. a = tokenize_function(A ) a = dataset.map(A, batched=A, num_proc=4, remove_columns=["text"] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(A : List[Any] ): # Concatenate all texts. a = {k: sum(examples[k], [] ) for k in examples.keys()} a = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 a = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. a = { k: [t[i : i + args.max_length] for i in range(0, A, args.max_length )] for k, t in concatenated_examples.items() } return result a = dataset_tokenized.map(A, batched=A, batch_size=1000, num_proc=4 ) a = 0 a = 0 for shard in range(0, len(A ), args.shard_size ): a = grouped_dataset[shard : shard + args.shard_size] a = len(dataset_snapshot["input_ids"] ) a = os.path.join(A, F"""dataset-{shard_count}-{records_containing}.tfrecord""" ) a = get_serialized_examples(A ) with tf.io.TFRecordWriter(A ) as out_file: for i in range(len(A ) ): a = serialized_examples[i] out_file.write(A ) print("Wrote file {} containing {} records".format(A, A ) ) shard_count += 1 total_records += records_containing with open(F"""split-{args.split}-records-count.txt""", "w" ) as f: print(F"""Total {args.split} records: {total_records}""", file=A ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = parse_args() main(args)
662
import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def __magic_name__ ( A : Union[str, Any] ): '''simple docstring''' a = fname.split(os.path.sep )[-1] return re.search(R"^(.*)_\d+\.jpg$", A ).groups()[0] class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None ) -> Tuple: a = file_names a = image_transform a = label_to_id def __len__( self : Any ) -> Tuple: return len(self.file_names ) def __getitem__( self : List[Any] , __lowerCamelCase : List[Any] ) -> int: a = self.file_names[idx] a = PIL.Image.open(__lowerCamelCase ) a = raw_image.convert("RGB" ) if self.image_transform is not None: a = self.image_transform(__lowerCamelCase ) a = extract_label(__lowerCamelCase ) if self.label_to_id is not None: a = self.label_to_id[label] return {"image": image, "label": label} def __magic_name__ ( A : str, A : int ): '''simple docstring''' if args.with_tracking: a = Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir ) else: a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs a = config["lr"] a = int(config["num_epochs"] ) a = int(config["seed"] ) a = int(config["batch_size"] ) a = config["image_size"] if not isinstance(A, (list, tuple) ): a = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps, "isdigit" ): if args.checkpointing_steps == "epoch": a = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): a = int(args.checkpointing_steps ) else: raise ValueError( F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" ) else: a = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: a = os.path.split(A )[-1].split("." )[0] accelerator.init_trackers(A, A ) # Grab all the image filenames a = [os.path.join(args.data_dir, A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )] # Build the label correspondences a = [extract_label(A ) for fname in file_names] a = list(set(A ) ) id_to_label.sort() a = {lbl: i for i, lbl in enumerate(A )} # Set the seed before splitting the data. np.random.seed(A ) torch.manual_seed(A ) torch.cuda.manual_seed_all(A ) # Split our filenames between train and validation a = np.random.permutation(len(A ) ) a = int(0.8 * len(A ) ) a = random_perm[:cut] a = random_perm[cut:] # For training we use a simple RandomResizedCrop a = Compose([RandomResizedCrop(A, scale=(0.5, 1.0) ), ToTensor()] ) a = PetsDataset( [file_names[i] for i in train_split], image_transform=A, label_to_id=A ) # For evaluation, we use a deterministic Resize a = Compose([Resize(A ), ToTensor()] ) a = PetsDataset([file_names[i] for i in eval_split], image_transform=A, label_to_id=A ) # Instantiate dataloaders. a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 ) a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) a = create_model("resnet50d", pretrained=A, num_classes=len(A ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). a = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): a = False for param in model.get_classifier().parameters(): a = True # We normalize the batches of images to be a bit faster. a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device ) a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer a = torch.optim.Adam(params=model.parameters(), lr=lr / 25 ) # Instantiate learning rate scheduler a = OneCycleLR(optimizer=A, max_lr=A, epochs=A, steps_per_epoch=len(A ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. a , a , a , a , a = accelerator.prepare( A, A, A, A, A ) # We need to keep track of how many total steps we have iterated over a = 0 # We also need to keep track of the starting epoch so files are named properly a = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" ) accelerator.load_state(args.resume_from_checkpoint ) a = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` a = os.path.splitext(A )[0] if "epoch" in training_difference: a = int(training_difference.replace("epoch_", "" ) ) + 1 a = None else: a = int(training_difference.replace("step_", "" ) ) a = resume_step // len(A ) resume_step -= starting_epoch * len(A ) # Now we train the model for epoch in range(A, A ): model.train() if args.with_tracking: a = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step a = accelerator.skip_first_batches(A, A ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader a = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. a = {k: v.to(accelerator.device ) for k, v in batch.items()} a = (batch["image"] - mean) / std a = model(A ) a = torch.nn.functional.cross_entropy(A, batch["label"] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(A ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(A, A ): a = F"""step_{overall_step}""" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: a = os.path.join(args.output_dir, A ) accelerator.save_state(A ) model.eval() a = 0 a = 0 for step, batch in enumerate(A ): # We could avoid this line since we set the accelerator with `device_placement=True`. a = {k: v.to(accelerator.device ) for k, v in batch.items()} a = (batch["image"] - mean) / std with torch.no_grad(): a = model(A ) a = outputs.argmax(dim=-1 ) a , a = accelerator.gather_for_metrics((predictions, batch["label"]) ) a = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() a = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" ) if args.with_tracking: accelerator.log( { "accuracy": 100 * eval_metric, "train_loss": total_loss.item() / len(A ), "epoch": epoch, }, step=A, ) if checkpointing_steps == "epoch": a = F"""epoch_{epoch}""" if args.output_dir is not None: a = os.path.join(args.output_dir, A ) accelerator.save_state(A ) if args.with_tracking: accelerator.end_training() def __magic_name__ ( ): '''simple docstring''' a = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument("--data_dir", required=A, help="The data folder on disk." ) parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." ) parser.add_argument( "--mixed_precision", type=A, default=A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." ) parser.add_argument( "--checkpointing_steps", type=A, default=A, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--output_dir", type=A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", ) parser.add_argument( "--resume_from_checkpoint", type=A, default=A, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", ) parser.add_argument( "--project_dir", type=A, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", ) a = parser.parse_args() a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224} training_function(A, A ) if __name__ == "__main__": main()
662
1
def __magic_name__ ( A : Any ): '''simple docstring''' a = 1 for i in range(1, num + 1 ): fact *= i return fact def __magic_name__ ( A : Dict ): '''simple docstring''' a = 0 while number > 0: a = number % 10 sum_of_digits += last_digit a = number // 10 # Removing the last_digit from the given number return sum_of_digits def __magic_name__ ( A : Tuple = 100 ): '''simple docstring''' a = factorial(SCREAMING_SNAKE_CASE_ ) a = split_and_add(SCREAMING_SNAKE_CASE_ ) return result if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
700
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __lowerCAmelCase : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8') __lowerCAmelCase : Tuple = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('utf-8').split() __lowerCAmelCase : Dict = '|'.join(sys.argv[1:]) __lowerCAmelCase : List[Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''') __lowerCAmelCase : List[Any] = [x for x in modified_files if regex.match(x)] print(' '.join(relevant_modified_files), end='')
662
0
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 __lowerCAmelCase : List[str] = get_tests_dir('fixtures') class snake_case__ (unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: # A mock response for an HTTP head request to emulate server down a = mock.Mock() a = 5_00 a = {} a = HTTPError a = {} # Download this model to make sure it's in the cache. a = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" , return_value=lowerCAmelCase__ ) as mock_head: a = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" ) # This check we did call the fake head request mock_head.assert_called() def __UpperCAmelCase ( self : List[Any] ) -> Dict: # This test is for deprecated behavior and can be removed in v5 a = WavaVecaFeatureExtractor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" ) @is_staging_test class snake_case__ (unittest.TestCase ): """simple docstring""" @classmethod def __UpperCAmelCase ( cls : List[str] ) -> List[Any]: a = TOKEN HfFolder.save_token(lowerCAmelCase__ ) @classmethod def __UpperCAmelCase ( cls : Optional[int] ) -> Union[str, Any]: try: delete_repo(token=cls._token , repo_id="test-feature-extractor" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" ) except HTTPError: pass def __UpperCAmelCase ( self : Any ) -> Any: a = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ ) feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token ) a = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id="test-feature-extractor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( lowerCAmelCase__ , repo_id="test-feature-extractor" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token ) a = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __UpperCAmelCase ( self : List[Any] ) -> Tuple: a = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ ) feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token ) a = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( lowerCAmelCase__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token ) a = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __UpperCAmelCase ( self : Dict ) -> List[str]: CustomFeatureExtractor.register_for_auto_class() a = CustomFeatureExtractor.from_pretrained(lowerCAmelCase__ ) feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , ) a = AutoFeatureExtractor.from_pretrained( f"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=lowerCAmelCase__ ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
701
def __magic_name__ ( A : int, A : int, A : int ): '''simple docstring''' if exponent == 1: return base if exponent % 2 == 0: a = _modexpt(A, exponent // 2, A ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(A, exponent - 1, A )) % modulo_value def __magic_name__ ( A : int = 1777, A : int = 1855, A : int = 8 ): '''simple docstring''' a = base for _ in range(1, A ): a = _modexpt(A, A, 10**digits ) return result if __name__ == "__main__": print(F'''{solution() = }''')
662
0
'''simple docstring''' import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowerCAmelCase : Any = logging.get_logger(__name__) __lowerCAmelCase : Union[str, Any] = { 'vocab_file': 'vocab.json', 'tokenizer_config_file': 'tokenizer_config.json', 'merges_file': 'merges.txt', } __lowerCAmelCase : List[str] = { 'vocab_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json' ), }, 'tokenizer_config_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json' ), }, 'merges_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt' ), }, } __lowerCAmelCase : int = '</w>' __lowerCAmelCase : List[Any] = '@@ ' def __magic_name__ ( A : Tuple ): '''simple docstring''' a = set() a = word[0] for char in word[1:]: pairs.add((prev_char, char) ) a = char return pairs # Speech2Text2 has no max input length __lowerCAmelCase : Dict = {'facebook/s2t-wav2vec2-large-en-de': 1024} class snake_case__ (snake_case__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : str = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""input_ids""", """attention_mask"""] def __init__( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Tuple=False , __lowerCamelCase : Any=None , **__lowerCamelCase : List[str] , ) -> List[str]: super().__init__( unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , **UpperCAmelCase_ , ) a = do_lower_case with open(UpperCAmelCase_ , encoding="utf-8" ) as vocab_handle: a = json.load(UpperCAmelCase_ ) a = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" ) a = None a = None else: with open(UpperCAmelCase_ , encoding="utf-8" ) as merges_handle: a = merges_handle.read().split("\n" )[:-1] a = [tuple(merge.split()[:2] ) for merge in merges] a = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) a = {} @property def __UpperCAmelCase ( self : Dict ) -> int: return len(self.decoder ) def __UpperCAmelCase ( self : int ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> List[Any]: a = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] a = get_pairs(UpperCAmelCase_ ) if not pairs: return token while True: a = min(UpperCAmelCase_ , key=lambda __lowerCamelCase : self.bpe_ranks.get(UpperCAmelCase_ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break a , a = bigram a = [] a = 0 while i < len(UpperCAmelCase_ ): try: a = word.index(UpperCAmelCase_ , UpperCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) a = j if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 a = tuple(UpperCAmelCase_ ) a = new_word if len(UpperCAmelCase_ ) == 1: break else: a = get_pairs(UpperCAmelCase_ ) a = " ".join(UpperCAmelCase_ ) if word == "\n " + BPE_TOKEN_MERGES: a = "\n" + BPE_TOKEN_MERGES if word.endswith(UpperCAmelCase_ ): a = word.replace(UpperCAmelCase_ , "" ) a = word.replace(" " , UpperCAmelCase_ ) a = word return word def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> List[str]: if self.bpe_ranks is None: raise ValueError( "This tokenizer was instantiated without a `merges.txt` file, so" " that it can only be used for decoding, not for encoding." "Make sure to provide `merges.txt` file at instantiation to enable " "encoding." ) if self.do_lower_case: a = text.lower() a = text.split() a = [] for token in text: if token: split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(" " ) ) ) return split_tokens def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str ) -> int: return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : int ) -> str: a = self.decoder.get(UpperCAmelCase_ , self.unk_token ) return result def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[str] ) -> str: a = " ".join(UpperCAmelCase_ ) # make sure @@ tokens are concatenated a = "".join(string.split(UpperCAmelCase_ ) ) return string def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(UpperCAmelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) a = os.path.join( UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ ) + "\n" ) a = 0 if self.bpe_ranks is None: return (vocab_file,) with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) a = token_index writer.write(" ".join(UpperCAmelCase_ ) + "\n" ) index += 1 return (vocab_file, merges_file)
702
def __magic_name__ ( A : str, A : str ): '''simple docstring''' def get_matched_characters(A : str, A : str ) -> str: a = [] a = min(len(_stra ), len(_stra ) ) // 2 for i, l in enumerate(_stra ): a = int(max(0, i - limit ) ) a = int(min(i + limit + 1, len(_stra ) ) ) if l in _stra[left:right]: matched.append(A ) a = F"""{_stra[0:_stra.index(A )]} {_stra[_stra.index(A ) + 1:]}""" return "".join(A ) # matching characters a = get_matched_characters(A, A ) a = get_matched_characters(A, A ) a = len(A ) # transposition a = ( len([(ca, ca) for ca, ca in zip(A, A ) if ca != ca] ) // 2 ) if not match_count: a = 0.0 else: a = ( 1 / 3 * ( match_count / len(A ) + match_count / len(A ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters a = 0 for ca, ca in zip(stra[:4], stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler('hello', 'world'))
662
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType __magic_name__ : List[Any] = logging.get_logger(__name__) __magic_name__ : Optional[Any] = { "openai/imagegpt-small": "", "openai/imagegpt-medium": "", "openai/imagegpt-large": "", } class snake_case__ (_SCREAMING_SNAKE_CASE ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = "imagegpt" SCREAMING_SNAKE_CASE_ : int = ["past_key_values"] SCREAMING_SNAKE_CASE_ : Union[str, Any] = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Tuple , __lowerCamelCase : Tuple=5_12 + 1 , __lowerCamelCase : Tuple=32 * 32 , __lowerCamelCase : Dict=5_12 , __lowerCamelCase : str=24 , __lowerCamelCase : Union[str, Any]=8 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any="quick_gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Tuple=1e-5 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : int=False , __lowerCamelCase : Tuple=False , **__lowerCamelCase : int , ) -> Dict: a = vocab_size a = n_positions a = n_embd a = n_layer a = n_head a = n_inner a = activation_function a = resid_pdrop a = embd_pdrop a = attn_pdrop a = layer_norm_epsilon a = initializer_range a = scale_attn_weights a = use_cache a = scale_attn_by_inverse_layer_idx a = reorder_and_upcast_attn a = tie_word_embeddings super().__init__(tie_word_embeddings=A_ , **A_ ) class snake_case__ (_SCREAMING_SNAKE_CASE ): """simple docstring""" @property def __UpperCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ] ) def __UpperCAmelCase ( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Any] = -1 , __lowerCamelCase : Dict = False , __lowerCamelCase : Dict = None , __lowerCamelCase : Dict = 3 , __lowerCamelCase : Optional[int] = 32 , __lowerCamelCase : Optional[Any] = 32 , ) -> Mapping[str, Any]: a = self._generate_dummy_images(A_ , A_ , A_ , A_ ) a = dict(preprocessor(images=A_ , return_tensors=A_ ) ) return inputs
703
__lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)} def __magic_name__ ( A : int ): '''simple docstring''' return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) ) def __magic_name__ ( ): '''simple docstring''' return sum( number for number in range(1000, 1000000 ) if number == digits_fifth_powers_sum(A ) ) if __name__ == "__main__": print(solution())
662
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class snake_case__ : """simple docstring""" def __init__( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Any=13 , __lowerCamelCase : str=7 , __lowerCamelCase : int=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=99 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : Union[str, Any]=5 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : Tuple=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Dict=5_12 , __lowerCamelCase : Optional[int]=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : int=4 , __lowerCamelCase : Any=None , ) -> List[str]: a = parent a = batch_size a = seq_length a = is_training a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_labels a = num_choices a = scope a = self.vocab_size - 1 def __UpperCAmelCase ( self : Tuple ) -> int: a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = None a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a = ids_tensor([self.batch_size] , self.num_choices ) a = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) a = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Dict , *__lowerCamelCase : str ) -> List[Any]: a = OpenAIGPTModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() a = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ ) a = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) a = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int , *__lowerCamelCase : Optional[Any] ) -> Dict: a = OpenAIGPTLMHeadModel(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() a = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : str , *__lowerCamelCase : Union[str, Any] ) -> Dict: a = OpenAIGPTDoubleHeadsModel(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() a = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , *__lowerCamelCase : List[str] ) -> Any: a = self.num_labels a = OpenAIGPTForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]: a = self.prepare_config_and_inputs() ( a ) = config_and_inputs a = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class snake_case__ (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ : Dict = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly SCREAMING_SNAKE_CASE_ : Tuple = ( { """feature-extraction""": OpenAIGPTModel, """text-classification""": OpenAIGPTForSequenceClassification, """text-generation""": OpenAIGPTLMHeadModel, """zero-shot""": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def __UpperCAmelCase ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any ) -> Union[str, Any]: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def __UpperCAmelCase ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=False ) -> List[Any]: a = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": a = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase_ , ) a = inputs_dict['labels'] a = inputs_dict['labels'] a = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCamelCase_ , ) a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ ) return inputs_dict def __UpperCAmelCase ( self : List[str] ) -> Any: a = OpenAIGPTModelTester(self ) a = ConfigTester(self , config_class=UpperCamelCase_ , n_embd=37 ) def __UpperCAmelCase ( self : Any ) -> Union[str, Any]: self.config_tester.run_common_tests() def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*UpperCamelCase_ ) def __UpperCAmelCase ( self : Any ) -> Union[str, Any]: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*UpperCamelCase_ ) def __UpperCAmelCase ( self : int ) -> Union[str, Any]: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*UpperCamelCase_ ) def __UpperCAmelCase ( self : Dict ) -> Any: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCamelCase_ ) @slow def __UpperCAmelCase ( self : str ) -> int: for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = OpenAIGPTModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @require_torch class snake_case__ (unittest.TestCase ): """simple docstring""" @slow def __UpperCAmelCase ( self : str ) -> List[Any]: a = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" ) model.to(UpperCamelCase_ ) a = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=UpperCamelCase_ ) # the president is a = [ 4_81, 47_35, 5_44, 2_46, 9_63, 8_70, 7_62, 2_39, 2_44, 4_04_77, 2_44, 2_49, 7_19, 8_81, 4_87, 5_44, 2_40, 2_44, 6_03, 4_81, ] # the president is a very good man. " \n " i\'m sure he is, " said the a = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ ) self.assertListEqual(output_ids[0].tolist() , UpperCamelCase_ )
704
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class snake_case__ (unittest.TestCase ): """simple docstring""" def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any=7 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : int=30 , __lowerCamelCase : int=4_00 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=1 / 2_55 , __lowerCamelCase : Optional[int]=True , ) -> str: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33} a = parent a = batch_size a = num_channels a = min_resolution a = max_resolution a = do_resize a = size a = do_normalize a = image_mean a = image_std a = do_rescale a = rescale_factor a = do_pad def __UpperCAmelCase ( self : List[Any] ) -> Any: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str=False ) -> List[str]: if not batched: a = image_inputs[0] if isinstance(__lowerCamelCase , Image.Image ): a , a = image.size else: a , a = image.shape[1], image.shape[2] if w < h: a = int(self.size["shortest_edge"] * h / w ) a = self.size["shortest_edge"] elif w > h: a = self.size["shortest_edge"] a = int(self.size["shortest_edge"] * w / h ) else: a = self.size["shortest_edge"] a = self.size["shortest_edge"] else: a = [] for image in image_inputs: a , a = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0] a = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class snake_case__ (_UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = DetaImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]: a = DetaImageProcessingTester(self ) @property def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self : Optional[int] ) -> Tuple: a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) ) self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) ) self.assertTrue(hasattr(__lowerCamelCase , "size" ) ) def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: a = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} ) self.assertEqual(image_processor.do_pad , __lowerCamelCase ) def __UpperCAmelCase ( self : Any ) -> int: pass def __UpperCAmelCase ( self : Any ) -> Any: # Initialize image_processing a = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: # Initialize image_processing a = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) # Test not batched input a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCAmelCase ( self : Any ) -> List[str]: # Initialize image_processing a = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched a = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values a , a = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __UpperCAmelCase ( self : Any ) -> List[Any]: # prepare image and target a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: a = json.loads(f.read() ) a = {"image_id": 3_97_69, "annotations": target} # encode them a = DetaImageProcessor() a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" ) # verify pixel values a = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase ) a = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) ) # verify area a = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) ) # verify boxes a = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase ) a = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) ) # verify image_id a = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) ) # verify is_crowd a = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) ) # verify class_labels a = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) ) # verify orig_size a = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) ) # verify size a = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) ) @slow def __UpperCAmelCase ( self : Any ) -> Union[str, Any]: # prepare image, target and masks_path a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: a = json.loads(f.read() ) a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target} a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them a = DetaImageProcessor(format="coco_panoptic" ) a = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" ) # verify pixel values a = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase ) a = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) ) # verify area a = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) ) # verify boxes a = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase ) a = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1e-3 ) ) # verify image_id a = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) ) # verify is_crowd a = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) ) # verify class_labels a = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) ) # verify masks a = 82_28_73 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase ) # verify orig_size a = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) ) # verify size a = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
662
0
'''simple docstring''' from __future__ import annotations import time import numpy as np __lowerCAmelCase : Dict = [8, 5, 9, 7] __lowerCAmelCase : Optional[Any] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] __lowerCAmelCase : Any = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class snake_case__ : """simple docstring""" def __init__( self : str , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ) -> str: a = claim_vector a = allocated_resources_table a = maximum_claim_table def __UpperCAmelCase ( self : Union[str, Any] ) -> str: return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def __UpperCAmelCase ( self : Optional[int] ) -> Dict: return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def __UpperCAmelCase ( self : List[str] ) -> List[Any]: return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def __UpperCAmelCase ( self : Tuple ) -> Tuple: return {self.__need().index(__lowerCamelCase ): i for i in self.__need()} def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Union[str, Any] ) -> Optional[int]: a = self.__need() a = self.__allocated_resources_table a = self.__available_resources() a = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("_" * 50 + "\n" ) while need_list: a = False for each_need in need_list: a = True for index, need in enumerate(__lowerCamelCase ): if need > available_resources[index]: a = False break if execution: a = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: a = original_need_index print(f"""Process {process_number + 1} is executing.""" ) # remove the process run from stack need_list.remove(__lowerCamelCase ) # update available/freed resources stack a = np.array(__lowerCamelCase ) + np.array( alloc_resources_table[process_number] ) print( "Updated available resource stack for processes: " + " ".join([str(__lowerCamelCase ) for x in available_resources] ) ) break if safe: print("The process is in a safe state.\n" ) else: print("System in unsafe state. Aborting...\n" ) break def __UpperCAmelCase ( self : Optional[int] ) -> Any: print(" " * 9 + "Allocated Resource Table" ) for item in self.__allocated_resources_table: print( f"""P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}""" + " ".join(f"""{it:>8}""" for it in item ) + "\n" ) print(" " * 9 + "System Resource Table" ) for item in self.__maximum_claim_table: print( f"""P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}""" + " ".join(f"""{it:>8}""" for it in item ) + "\n" ) print( "Current Usage by Active Processes: " + " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) ) print( "Initial Available Resources: " + " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
705
def __magic_name__ ( A : list ): '''simple docstring''' for i in range(len(A ) - 1, 0, -1 ): a = False for j in range(A, 0, -1 ): if unsorted[j] < unsorted[j - 1]: a , a = unsorted[j - 1], unsorted[j] a = True for j in range(A ): if unsorted[j] > unsorted[j + 1]: a , a = unsorted[j + 1], unsorted[j] a = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip() __lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(',')] print(F'''{cocktail_shaker_sort(unsorted) = }''')
662
0
def __magic_name__ ( A : str = 600851475143 ): try: a = int(lowerCamelCase_ ) except (TypeError, ValueError): raise TypeError("Parameter n must be int or castable to int." ) if n <= 0: raise ValueError("Parameter n must be greater than or equal to one." ) a = 1 a = 2 while i * i <= n: while n % i == 0: a = i n //= i i += 1 if n > 1: a = n return int(lowerCamelCase_ ) if __name__ == "__main__": print(F'''{solution() = }''')
706
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo __lowerCAmelCase : Optional[Any] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' __lowerCAmelCase : str = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' __lowerCAmelCase : List[Any] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case__ (datasets.Metric ): """simple docstring""" def __UpperCAmelCase ( self : int ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[List[List[str]]] , __lowerCamelCase : List[List[str]] , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=__lowerCamelCase , hypotheses=__lowerCamelCase , min_len=__lowerCamelCase , max_len=__lowerCamelCase ) }
662
0
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging __lowerCAmelCase : int = logging.get_logger(__name__) __lowerCAmelCase : Optional[int] = { "CarlCochet/trajectory-transformer-halfcheetah-medium-v2": ( "https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class snake_case__ (UpperCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = 'trajectory_transformer' SCREAMING_SNAKE_CASE_ : int = ['past_key_values'] SCREAMING_SNAKE_CASE_ : Optional[Any] = { 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Union[str, Any] , __lowerCamelCase : str=1_00 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : int=1 , __lowerCamelCase : Any=2_49 , __lowerCamelCase : Any=6 , __lowerCamelCase : Optional[int]=17 , __lowerCamelCase : Tuple=25 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : Optional[Any]=1_28 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Union[str, Any]=0.0_006 , __lowerCamelCase : Dict=5_12 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : int=1e-12 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]=1 , __lowerCamelCase : List[str]=5_02_56 , __lowerCamelCase : List[Any]=5_02_56 , **__lowerCamelCase : str , ) -> Tuple: a = vocab_size a = action_weight a = reward_weight a = value_weight a = max_position_embeddings a = block_size a = action_dim a = observation_dim a = transition_dim a = learning_rate a = n_layer a = n_head a = n_embd a = embd_pdrop a = attn_pdrop a = resid_pdrop a = initializer_range a = layer_norm_eps a = kaiming_initializer_range a = use_cache super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
707
import argparse import os import re __lowerCAmelCase : Union[str, Any] = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict __lowerCAmelCase : Dict = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings __lowerCAmelCase : Any = re.compile(r'\s*\(\s*"(\S[^"]+)"') def __magic_name__ ( A : int, A : bool = False ): '''simple docstring''' with open(A, "r", encoding="utf-8" ) as f: a = f.read() a = content.split("\n" ) a = [] a = 0 while line_idx < len(A ): if _re_intro_mapping.search(lines[line_idx] ) is not None: a = len(re.search(R"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 a = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": a = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers a = sorted(A, key=lambda A : _re_identifier.search(A ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(A, "w", encoding="utf-8" ) as f: f.write("\n".join(A ) ) elif "\n".join(A ) != content: return True def __magic_name__ ( A : bool = False ): '''simple docstring''' a = [os.path.join(A, A ) for f in os.listdir(A ) if f.endswith(".py" )] a = [sort_auto_mapping(A, overwrite=A ) for fname in fnames] if not overwrite and any(A ): a = [f for f, d in zip(A, A ) if d] raise ValueError( F"""The following files have auto mappings that need sorting: {", ".join(A )}. Run `make style` to fix""" " this." ) if __name__ == "__main__": __lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') __lowerCAmelCase : Optional[Any] = parser.parse_args() sort_all_auto_mappings(not args.check_only)
662
0
import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class snake_case__ (__A ): """simple docstring""" @require_torch def __UpperCAmelCase ( self : Tuple ) -> Optional[int]: a = ''' from transformers import BertConfig, BertModel, BertTokenizer, pipeline ''' a = ''' mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") ''' a = ''' import socket def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet") socket.socket = offline_socket ''' # Force fetching the files so that we can use the cache a = '''hf-internal-testing/tiny-random-bert''' BertConfig.from_pretrained(__lowerCamelCase ) BertModel.from_pretrained(__lowerCamelCase ) BertTokenizer.from_pretrained(__lowerCamelCase ) pipeline(task="fill-mask" , model=__lowerCamelCase ) # baseline - just load from_pretrained with normal network a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )] # should succeed a = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a = '''1''' a = subprocess.run(__lowerCamelCase , env=__lowerCamelCase , check=__lowerCamelCase , capture_output=__lowerCamelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def __UpperCAmelCase ( self : str ) -> List[str]: a = ''' from transformers import BertConfig, BertModel, BertTokenizer, pipeline ''' a = ''' mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") ''' a = ''' import socket def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet") socket.socket = offline_socket ''' # Force fetching the files so that we can use the cache a = '''hf-internal-testing/tiny-random-bert''' BertConfig.from_pretrained(__lowerCamelCase ) BertModel.from_pretrained(__lowerCamelCase ) BertTokenizer.from_pretrained(__lowerCamelCase ) pipeline(task="fill-mask" , model=__lowerCamelCase ) # baseline - just load from_pretrained with normal network a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )] # should succeed a = self.get_env() a = subprocess.run(__lowerCamelCase , env=__lowerCamelCase , check=__lowerCamelCase , capture_output=__lowerCamelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def __UpperCAmelCase ( self : Any ) -> Optional[Any]: a = ''' from transformers import BertConfig, BertModel, BertTokenizer ''' a = ''' mname = "hf-internal-testing/tiny-random-bert-sharded" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) print("success") ''' a = ''' import socket def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled") socket.socket = offline_socket ''' # baseline - just load from_pretrained with normal network a = [sys.executable, '''-c''', '''\n'''.join([load, run] )] # should succeed a = self.get_env() a = subprocess.run(__lowerCamelCase , env=__lowerCamelCase , check=__lowerCamelCase , capture_output=__lowerCamelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) # next emulate no network a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a = '''1''' a = subprocess.run(__lowerCamelCase , env=__lowerCamelCase , check=__lowerCamelCase , capture_output=__lowerCamelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def __UpperCAmelCase ( self : Tuple ) -> Optional[int]: a = ''' from transformers import pipeline ''' a = ''' mname = "hf-internal-testing/tiny-random-bert" pipe = pipeline(model=mname) ''' a = ''' import socket def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled") socket.socket = offline_socket ''' a = self.get_env() a = '''1''' a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )] a = subprocess.run(__lowerCamelCase , env=__lowerCamelCase , check=__lowerCamelCase , capture_output=__lowerCamelCase ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( "You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , ) @require_torch def __UpperCAmelCase ( self : int ) -> Optional[int]: a = ''' from transformers import AutoModel ''' a = ''' mname = "hf-internal-testing/test_dynamic_model" AutoModel.from_pretrained(mname, trust_remote_code=True) print("success") ''' # baseline - just load from_pretrained with normal network a = [sys.executable, '''-c''', '''\n'''.join([load, run] )] # should succeed a = self.get_env() a = subprocess.run(__lowerCamelCase , env=__lowerCamelCase , check=__lowerCamelCase , capture_output=__lowerCamelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a = '''1''' a = subprocess.run(__lowerCamelCase , env=__lowerCamelCase , check=__lowerCamelCase , capture_output=__lowerCamelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() )
708
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowerCAmelCase : int = logging.get_logger(__name__) __lowerCAmelCase : Optional[int] = '▁' __lowerCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model'} __lowerCAmelCase : int = { 'vocab_file': { 'google/reformer-crime-and-punishment': ( 'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model' ) } } __lowerCAmelCase : Any = { 'google/reformer-crime-and-punishment': 52_4288, } class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""] def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Dict=[] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ) -> None: a = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , ) a = vocab_file a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCamelCase ) @property def __UpperCAmelCase ( self : Optional[int] ) -> int: return self.sp_model.get_piece_size() def __UpperCAmelCase ( self : Tuple ) -> Dict[str, int]: a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[Any] ) -> Optional[Any]: a = self.__dict__.copy() a = None return state def __setstate__( self : str , __lowerCamelCase : Tuple ) -> List[Any]: a = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): a = {} a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __UpperCAmelCase ( self : int , __lowerCamelCase : str ) -> List[str]: return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict ) -> Any: return self.sp_model.piece_to_id(__lowerCamelCase ) def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] ) -> str: if index < self.sp_model.get_piece_size(): a = self.sp_model.IdToPiece(__lowerCamelCase ) return token def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> List[Any]: a = [] a = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCamelCase ) + token a = [] else: current_sub_tokens.append(__lowerCamelCase ) out_string += self.sp_model.decode(__lowerCamelCase ) return out_string.strip() def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCamelCase , "wb" ) as fi: a = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (out_vocab_file,)
662
0
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class snake_case__ : """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = None SCREAMING_SNAKE_CASE_ : Any = None SCREAMING_SNAKE_CASE_ : Optional[Any] = None # sigma(t_i) @classmethod def __UpperCAmelCase ( cls : Tuple ) -> Tuple: return cls() @dataclass class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = 42 SCREAMING_SNAKE_CASE_ : Union[str, Any] = 42 SCREAMING_SNAKE_CASE_ : int = 42 class snake_case__ (_UpperCamelCase , _UpperCamelCase ): """simple docstring""" @property def __UpperCAmelCase ( self : Tuple ) -> Tuple: return True @register_to_config def __init__( self : Optional[int] , __lowerCamelCase : Tuple = 0.02 , __lowerCamelCase : Dict = 1_00 , __lowerCamelCase : Optional[int] = 1.007 , __lowerCamelCase : int = 80 , __lowerCamelCase : Any = 0.05 , __lowerCamelCase : int = 50 , ) -> Union[str, Any]: pass def __UpperCAmelCase ( self : Tuple ) -> Dict: return KarrasVeSchedulerState.create() def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : List[str] = () ) -> Optional[Any]: a = jnp.arange(0 , _lowerCAmelCase )[::-1].copy() a = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=_lowerCAmelCase , schedule=jnp.array(_lowerCAmelCase , dtype=jnp.floataa ) , timesteps=_lowerCAmelCase , ) def __UpperCAmelCase ( self : int , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , ) -> Optional[Any]: if self.config.s_min <= sigma <= self.config.s_max: a = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: a = 0 # sample eps ~ N(0, S_noise^2 * I) a = random.split(_lowerCAmelCase , num=1 ) a = self.config.s_noise * random.normal(key=_lowerCAmelCase , shape=sample.shape ) a = sigma + gamma * sigma a = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] = True , ) -> Any: a = sample_hat + sigma_hat * model_output a = (sample_hat - pred_original_sample) / sigma_hat a = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=_lowerCAmelCase , derivative=_lowerCAmelCase , state=_lowerCAmelCase ) def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] = True , ) -> Dict: a = sample_prev + sigma_prev * model_output a = (sample_prev - pred_original_sample) / sigma_prev a = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=_lowerCAmelCase , derivative=_lowerCAmelCase , state=_lowerCAmelCase ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ) -> Optional[int]: raise NotImplementedError()
709
from __future__ import annotations import time import numpy as np __lowerCAmelCase : List[str] = [8, 5, 9, 7] __lowerCAmelCase : str = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] __lowerCAmelCase : Optional[Any] = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class snake_case__ : """simple docstring""" def __init__( self : Any , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ) -> None: a = claim_vector a = allocated_resources_table a = maximum_claim_table def __UpperCAmelCase ( self : List[str] ) -> list[int]: return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def __UpperCAmelCase ( self : str ) -> list[int]: return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def __UpperCAmelCase ( self : Dict ) -> list[list[int]]: return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def __UpperCAmelCase ( self : Dict ) -> dict[int, list[int]]: return {self.__need().index(__lowerCamelCase ): i for i in self.__need()} def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Any ) -> None: a = self.__need() a = self.__allocated_resources_table a = self.__available_resources() a = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("_" * 50 + "\n" ) while need_list: a = False for each_need in need_list: a = True for index, need in enumerate(__lowerCamelCase ): if need > available_resources[index]: a = False break if execution: a = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: a = original_need_index print(f"""Process {process_number + 1} is executing.""" ) # remove the process run from stack need_list.remove(__lowerCamelCase ) # update available/freed resources stack a = np.array(__lowerCamelCase ) + np.array( alloc_resources_table[process_number] ) print( "Updated available resource stack for processes: " + " ".join([str(__lowerCamelCase ) for x in available_resources] ) ) break if safe: print("The process is in a safe state.\n" ) else: print("System in unsafe state. Aborting...\n" ) break def __UpperCAmelCase ( self : Any ) -> str: print(" " * 9 + "Allocated Resource Table" ) for item in self.__allocated_resources_table: print( f"""P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}""" + " ".join(f"""{it:>8}""" for it in item ) + "\n" ) print(" " * 9 + "System Resource Table" ) for item in self.__maximum_claim_table: print( f"""P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}""" + " ".join(f"""{it:>8}""" for it in item ) + "\n" ) print( "Current Usage by Active Processes: " + " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) ) print( "Initial Available Resources: " + " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
662
0
from abc import ABC, abstractmethod from typing import List, Optional class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : List[str] ) -> Union[str, Any]: # test for the above condition self.test() def __UpperCAmelCase ( self : Any ) -> str: a = 0 a = False while not completed: if counter == 1: self.reset() a = self.advance() if not self.does_advance(UpperCamelCase_ ): raise Exception( "Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." ) a = self.update(UpperCamelCase_ ) counter += 1 if counter > 1_00_00: raise Exception("update() does not fulfill the constraint." ) if self.remaining() != 0: raise Exception("Custom Constraint is not defined correctly." ) @abstractmethod def __UpperCAmelCase ( self : List[str] ) -> str: raise NotImplementedError( f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __UpperCAmelCase ( self : int , __lowerCamelCase : Tuple ) -> Optional[Any]: raise NotImplementedError( f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[int] ) -> Dict: raise NotImplementedError( f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __UpperCAmelCase ( self : Optional[Any] ) -> Any: raise NotImplementedError( f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]: raise NotImplementedError( f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __UpperCAmelCase ( self : str , __lowerCamelCase : Optional[Any]=False ) -> Union[str, Any]: raise NotImplementedError( f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : List[Any] , __lowerCamelCase : int ) -> List[str]: super(UpperCamelCase_ , self ).__init__() if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or len(UpperCamelCase_ ) == 0: raise ValueError(f"""`token_ids` has to be a non-empty list, but is {token_ids}.""" ) if any((not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or token_id < 0) for token_id in token_ids ): raise ValueError(f"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" ) a = token_ids a = len(self.token_ids ) a = -1 # the index of the currently fulfilled step a = False def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]: if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Optional[Any] ) -> Dict: if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase_ )}""" ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def __UpperCAmelCase ( self : Dict , __lowerCamelCase : int ) -> Union[str, Any]: if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase_ )}""" ) a = False a = False a = False if self.does_advance(UpperCamelCase_ ): self.fulfilled_idx += 1 a = True if self.fulfilled_idx == (self.seqlen - 1): a = True a = completed else: # failed to make progress. a = True self.reset() return stepped, completed, reset def __UpperCAmelCase ( self : List[Any] ) -> List[str]: a = False a = 0 def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: return self.seqlen - (self.fulfilled_idx + 1) def __UpperCAmelCase ( self : Any , __lowerCamelCase : str=False ) -> List[Any]: a = PhrasalConstraint(self.token_ids ) if stateful: a = self.seqlen a = self.fulfilled_idx a = self.completed return new_constraint class snake_case__ : """simple docstring""" def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any]=True ) -> str: a = max([len(UpperCamelCase_ ) for one in nested_token_ids] ) a = {} for token_ids in nested_token_ids: a = root for tidx, token_id in enumerate(UpperCamelCase_ ): if token_id not in level: a = {} a = level[token_id] if no_subsets and self.has_subsets(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError( "Each list in `nested_token_ids` can't be a complete subset of another list, but is" f""" {nested_token_ids}.""" ) a = root def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> Optional[int]: a = self.trie for current_token in current_seq: a = start[current_token] a = list(start.keys() ) return next_tokens def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]: a = self.next_tokens(UpperCamelCase_ ) return len(UpperCamelCase_ ) == 0 def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Dict ) -> Tuple: a = list(root.values() ) if len(UpperCamelCase_ ) == 0: return 1 else: return sum([self.count_leaves(UpperCamelCase_ ) for nn in next_nodes] ) def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ) -> str: a = self.count_leaves(UpperCamelCase_ ) return len(UpperCamelCase_ ) != leaf_count class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : Dict , __lowerCamelCase : Any ) -> int: super(UpperCamelCase_ , self ).__init__() if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or len(UpperCamelCase_ ) == 0: raise ValueError(f"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" ) if any(not isinstance(UpperCamelCase_ , UpperCamelCase_ ) for token_ids in nested_token_ids ): raise ValueError(f"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" ) if any( any((not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( f"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" ) a = DisjunctiveTrie(UpperCamelCase_ ) a = nested_token_ids a = self.trie.max_height a = [] a = False def __UpperCAmelCase ( self : List[str] ) -> Dict: a = self.trie.next_tokens(self.current_seq ) if len(UpperCamelCase_ ) == 0: return None else: return token_list def __UpperCAmelCase ( self : Dict , __lowerCamelCase : List[str] ) -> int: if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase_ )}""" ) a = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Dict ) -> Optional[int]: if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase_ )}""" ) a = False a = False a = False if self.does_advance(UpperCamelCase_ ): self.current_seq.append(UpperCamelCase_ ) a = True else: a = True self.reset() a = self.trie.reached_leaf(self.current_seq ) a = completed return stepped, completed, reset def __UpperCAmelCase ( self : Any ) -> Optional[Any]: a = False a = [] def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]: if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def __UpperCAmelCase ( self : Any , __lowerCamelCase : Optional[Any]=False ) -> List[str]: a = DisjunctiveConstraint(self.token_ids ) if stateful: a = self.seqlen a = self.current_seq a = self.completed return new_constraint class snake_case__ : """simple docstring""" def __init__( self : Optional[int] , __lowerCamelCase : Dict ) -> Optional[int]: a = constraints # max # of steps required to fulfill a given constraint a = max([c.seqlen for c in constraints] ) a = len(UpperCamelCase_ ) a = False self.init_state() def __UpperCAmelCase ( self : str ) -> Union[str, Any]: a = [] a = None a = [constraint.copy(stateful=UpperCamelCase_ ) for constraint in self.constraints] def __UpperCAmelCase ( self : List[str] ) -> int: a = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def __UpperCAmelCase ( self : Tuple ) -> Dict: a = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" a = constraint.advance() if isinstance(UpperCamelCase_ , UpperCamelCase_ ): token_list.append(UpperCamelCase_ ) elif isinstance(UpperCamelCase_ , UpperCamelCase_ ): token_list.extend(UpperCamelCase_ ) else: a = self.inprogress_constraint.advance() if isinstance(UpperCamelCase_ , UpperCamelCase_ ): token_list.append(UpperCamelCase_ ) elif isinstance(UpperCamelCase_ , UpperCamelCase_ ): token_list.extend(UpperCamelCase_ ) if len(UpperCamelCase_ ) == 0: return None else: return token_list def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Tuple ) -> Optional[int]: self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint a = self.add(UpperCamelCase_ ) # the entire list of constraints are fulfilled if self.completed: break def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Tuple ) -> Dict: if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError(f"""`token_id` should be an `int`, but is `{token_id}`.""" ) a = False, False if self.completed: a = True a = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state a = self.inprogress_constraint.update(UpperCamelCase_ ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase_ ) ) a = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) a = None if len(self.pending_constraints ) == 0: # we're done! a = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(UpperCamelCase_ ): a = pending_constraint.update(UpperCamelCase_ ) if not stepped: raise Exception( "`constraint.update(token_id)` is not yielding incremental progress, " "even though `constraint.does_advance(token_id)` is true." ) if complete: self.complete_constraints.append(UpperCamelCase_ ) a = None if not complete and stepped: a = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". a = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. a = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[str, Any]=True ) -> Dict: a = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: a = [ constraint.copy(stateful=UpperCamelCase_ ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: a = self.inprogress_constraint.copy(stateful=UpperCamelCase_ ) a = [constraint.copy() for constraint in self.pending_constraints] return new_state
710
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal __lowerCAmelCase : List[Any] = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = TypeVar('DatasetType', Dataset, IterableDataset) def __magic_name__ ( A : List[DatasetType], A : Optional[List[float]] = None, A : Optional[int] = None, A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ): '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("Unable to interleave an empty list of datasets." ) for i, dataset in enumerate(A ): if not isinstance(A, (Dataset, IterableDataset) ): if isinstance(A, (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ "is an empty dataset dictionary." ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(A )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" ) if i == 0: a , a = ( (Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset) ) elif not isinstance(A, A ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" ) if dataset_type is Dataset: return _interleave_map_style_datasets( A, A, A, info=A, split=A, stopping_strategy=A ) else: return _interleave_iterable_datasets( A, A, A, info=A, split=A, stopping_strategy=A ) def __magic_name__ ( A : List[DatasetType], A : Optional[DatasetInfo] = None, A : Optional[NamedSplit] = None, A : int = 0, ): '''simple docstring''' if not dsets: raise ValueError("Unable to concatenate an empty list of datasets." ) for i, dataset in enumerate(A ): if not isinstance(A, (Dataset, IterableDataset) ): if isinstance(A, (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ "is an empty dataset dictionary." ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(A )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" ) if i == 0: a , a = ( (Dataset, IterableDataset) if isinstance(A, A ) else (IterableDataset, Dataset) ) elif not isinstance(A, A ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if dataset_type is Dataset: return _concatenate_map_style_datasets(A, info=A, split=A, axis=A ) else: return _concatenate_iterable_datasets(A, info=A, split=A, axis=A )
662
0
import math def __magic_name__ ( A : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(_lowerCAmelCase ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __magic_name__ ( A : float = 0.1 ): '''simple docstring''' a = 3 a = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1 ): primes += is_prime(_lowerCAmelCase ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
711
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: __lowerCAmelCase : Optional[int] = None __lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) __lowerCAmelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} __lowerCAmelCase : List[Any] = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), }, 'tokenizer_file': { 'google/bigbird-roberta-base': ( 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json' ), 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json' ), }, } __lowerCAmelCase : List[str] = { 'google/bigbird-roberta-base': 4096, 'google/bigbird-roberta-large': 4096, 'google/bigbird-base-trivia-itc': 4096, } __lowerCAmelCase : Any = '▁' class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : str = BigBirdTokenizer SCREAMING_SNAKE_CASE_ : str = ["""input_ids""", """attention_mask"""] SCREAMING_SNAKE_CASE_ : List[int] = [] def __init__( self : int , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Tuple="[CLS]" , **__lowerCamelCase : Optional[Any] , ) -> List[Any]: a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( __lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , ) a = vocab_file a = False if not self.vocab_file else True def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1] def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(__lowerCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ): copyfile(self.vocab_file , __lowerCamelCase ) return (out_vocab_file,)
662
0