code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowerCamelCase__ : Optional[int] = logging.get_logger(__name__) lowerCamelCase__ : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCamelCase__ : Union[str, Any] = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCamelCase__ : Tuple = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCamelCase__ : Optional[Any] = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCamelCase__ : Tuple = { """facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2, """facebook/dpr-ctx_encoder-multiset-base""": 5_1_2, } lowerCamelCase__ : Optional[Any] = { """facebook/dpr-question_encoder-single-nq-base""": 5_1_2, """facebook/dpr-question_encoder-multiset-base""": 5_1_2, } lowerCamelCase__ : Dict = { """facebook/dpr-reader-single-nq-base""": 5_1_2, """facebook/dpr-reader-multiset-base""": 5_1_2, } lowerCamelCase__ : Dict = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } lowerCamelCase__ : Any = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } lowerCamelCase__ : List[str] = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Optional[int] = VOCAB_FILES_NAMES __lowerCAmelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION __lowerCAmelCase : Any = DPRContextEncoderTokenizer class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES __lowerCAmelCase : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __lowerCAmelCase : List[str] = DPRQuestionEncoderTokenizer lowerCamelCase__ : Any = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) lowerCamelCase__ : int = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) lowerCamelCase__ : Optional[int] = R""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(UpperCAmelCase_ ) class _snake_case : def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' if titles is None and texts is None: return super().__call__( SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) elif titles is None or texts is None: lowercase__ : Union[str, Any] = titles if texts is None else texts return super().__call__( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) lowercase__ : Optional[int] = titles if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else [titles] lowercase__ : List[Any] = texts if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else [texts] lowercase__ : int = len(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = questions if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else [questions] * n_passages assert len(SCREAMING_SNAKE_CASE_) == len( SCREAMING_SNAKE_CASE_), f'There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE_)} titles and {len(SCREAMING_SNAKE_CASE_)} texts.' lowercase__ : Dict = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_)["""input_ids"""] lowercase__ : Union[str, Any] = super().__call__(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_)["""input_ids"""] lowercase__ : Union[str, Any] = { """input_ids""": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) ] } if return_attention_mask is not False: lowercase__ : Optional[int] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) lowercase__ : List[str] = attention_mask return self.pad(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 64 , SCREAMING_SNAKE_CASE_ = 4 , ): '''simple docstring''' lowercase__ : Dict = reader_input["""input_ids"""] lowercase__ , lowercase__ , lowercase__ : str = reader_output[:3] lowercase__ : List[Any] = len(SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = sorted(range(SCREAMING_SNAKE_CASE_) , reverse=SCREAMING_SNAKE_CASE_ , key=relevance_logits.__getitem__) lowercase__ : List[DPRReaderOutput] = [] for doc_id in sorted_docs: lowercase__ : Dict = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence lowercase__ : Optional[Any] = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: lowercase__ : int = sequence_ids.index(self.pad_token_id) else: lowercase__ : Optional[int] = len(SCREAMING_SNAKE_CASE_) lowercase__ : int = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE_ , top_spans=SCREAMING_SNAKE_CASE_ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE_ , start_index=SCREAMING_SNAKE_CASE_ , end_index=SCREAMING_SNAKE_CASE_ , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(SCREAMING_SNAKE_CASE_) >= num_spans: break return nbest_spans_predictions[:num_spans] def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' lowercase__ : Tuple = [] for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE_): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) lowercase__ : Optional[Any] = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: x[1] , reverse=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]' lowercase__ : Dict = end_index - start_index + 1 assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(SCREAMING_SNAKE_CASE_) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase_ ) class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ ): __lowerCAmelCase : int = VOCAB_FILES_NAMES __lowerCAmelCase : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Any = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : Any = READER_PRETRAINED_INIT_CONFIGURATION __lowerCAmelCase : Optional[Any] = ['input_ids', 'attention_mask'] __lowerCAmelCase : int = DPRReaderTokenizer
12
import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def UpperCamelCase ( lowercase_ ) -> Union[str, Any]: '''simple docstring''' return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def UpperCamelCase ( lowercase_ , lowercase_ ) -> List[Any]: '''simple docstring''' lowercase__ : int = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue lowercase__ : Optional[Any] = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" ) lowercase__ : Optional[Any] = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" ) lowercase__ : Optional[Any] = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" ) lowercase__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" ) lowercase__ : Optional[Any] = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" ) lowercase__ : Optional[int] = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" ) lowercase__ : List[Any] = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" ) lowercase__ : int = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" ) lowercase__ : Optional[Any] = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" ) lowercase__ : Optional[Any] = key.replace("""image_encoder.module""" , """flava.image_model""" ) lowercase__ : Any = key.replace("""text_encoder.module""" , """flava.text_model""" ) lowercase__ : Optional[Any] = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" ) lowercase__ : Tuple = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" ) lowercase__ : Any = key.replace("""text_projection""" , """flava.text_projection""" ) lowercase__ : List[Any] = key.replace("""image_projection""" , """flava.image_projection""" ) lowercase__ : str = value.float() for key, value in codebook_state_dict.items(): lowercase__ : Any = value return upgrade @torch.no_grad() def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Union[str, Any]: '''simple docstring''' if config_path is not None: lowercase__ : int = FlavaConfig.from_pretrained(lowercase_ ) else: lowercase__ : Optional[int] = FlavaConfig() lowercase__ : List[Any] = FlavaForPreTraining(lowercase_ ).eval() lowercase__ : Dict = convert_dalle_checkpoint(lowercase_ , lowercase_ , save_checkpoint=lowercase_ ) if os.path.exists(lowercase_ ): lowercase__ : Dict = torch.load(lowercase_ , map_location="""cpu""" ) else: lowercase__ : Dict = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" ) lowercase__ : int = upgrade_state_dict(lowercase_ , lowercase_ ) hf_model.load_state_dict(lowercase_ ) lowercase__ : Optional[int] = hf_model.state_dict() lowercase__ : Optional[int] = count_parameters(lowercase_ ) lowercase__ : Any = count_parameters(lowercase_ ) + count_parameters(lowercase_ ) assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) hf_model.save_pretrained(lowercase_ ) if __name__ == "__main__": lowerCamelCase__ : int = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""") parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") lowerCamelCase__ : List[str] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
12
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : str = 'philschmid/bart-large-cnn-samsum' __lowerCAmelCase : List[str] = ( 'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ' 'and returns a summary of the text.' ) __lowerCAmelCase : List[Any] = 'summarizer' __lowerCAmelCase : List[str] = AutoTokenizer __lowerCAmelCase : int = AutoModelForSeqaSeqLM __lowerCAmelCase : Tuple = ['text'] __lowerCAmelCase : Union[str, Any] = ['text'] def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.pre_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , truncation=SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.model.generate(**SCREAMING_SNAKE_CASE_)[0] def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.pre_processor.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_)
12
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _snake_case ( unittest.TestCase ): def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , ): '''simple docstring''' lowercase__ : List[str] = size if size is not None else {"""height""": 18, """width""": 18} lowercase__ : int = parent lowercase__ : Union[str, Any] = batch_size lowercase__ : List[str] = num_channels lowercase__ : str = image_size lowercase__ : int = min_resolution lowercase__ : Dict = max_resolution lowercase__ : Tuple = do_resize lowercase__ : Union[str, Any] = size lowercase__ : Any = do_normalize lowercase__ : Tuple = image_mean lowercase__ : str = image_std def lowercase__ ( self): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class _snake_case ( UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Optional[Any] = ViTImageProcessor if is_vision_available() else None def lowercase__ ( self): '''simple docstring''' lowercase__ : str = EfficientFormerImageProcessorTester(self) @property def lowercase__ ( self): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_mean""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_std""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size""")) def lowercase__ ( self): '''simple docstring''' pass def lowercase__ ( self): '''simple docstring''' lowercase__ : str = self.image_processing_class(**self.image_processor_dict) # create random PIL images lowercase__ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image) # Test not batched input lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched lowercase__ : str = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors lowercase__ : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray) # Test not batched input lowercase__ : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched lowercase__ : Dict = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors lowercase__ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor) # Test not batched input lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched lowercase__ : Any = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , )
12
1
import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _snake_case ( UpperCAmelCase_ , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class _snake_case ( unittest.TestCase ): @property def lowercase__ ( self): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = ort.SessionOptions() lowercase__ : Optional[Any] = False return options def lowercase__ ( self): '''simple docstring''' lowercase__ : List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""") lowercase__ : Dict = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""") lowercase__ : int = OnnxStableDiffusionInpaintPipeline.from_pretrained( """runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_) lowercase__ : Any = """A red cat sitting on a park bench""" lowercase__ : List[str] = np.random.RandomState(0) lowercase__ : Tuple = pipe( prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , mask_image=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=10 , generator=SCREAMING_SNAKE_CASE_ , output_type="""np""" , ) lowercase__ : Dict = output.images lowercase__ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) lowercase__ : List[str] = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3 def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""") lowercase__ : Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""") lowercase__ : Union[str, Any] = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""") lowercase__ : Tuple = OnnxStableDiffusionInpaintPipeline.from_pretrained( """runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_) lowercase__ : str = """A red cat sitting on a park bench""" lowercase__ : Optional[int] = np.random.RandomState(0) lowercase__ : Optional[Any] = pipe( prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , mask_image=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=20 , generator=SCREAMING_SNAKE_CASE_ , output_type="""np""" , ) lowercase__ : Optional[int] = output.images lowercase__ : Tuple = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) lowercase__ : Union[str, Any] = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
12
lowerCamelCase__ : dict[tuple[int, int, int], int] = {} def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on lowercase__ : Tuple = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one lowercase__ : Union[str, Any] = _calculate(days - 1 , lowercase_ , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 lowercase__ : List[str] = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter lowercase__ : Dict = _calculate(days - 1 , lowercase_ , 0 ) lowercase__ : List[str] = state_late + state_absent + state_ontime lowercase__ : List[Any] = prizestrings return prizestrings def UpperCamelCase ( lowercase_ = 30 ) -> int: '''simple docstring''' return _calculate(lowercase_ , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
12
1
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class _snake_case : def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ): '''simple docstring''' lowercase__ : str = parent lowercase__ : Optional[int] = batch_size lowercase__ : Optional[int] = seq_length lowercase__ : Union[str, Any] = is_training lowercase__ : Any = use_input_mask lowercase__ : Optional[int] = use_token_type_ids lowercase__ : Optional[Any] = use_labels lowercase__ : Optional[int] = vocab_size lowercase__ : Optional[Any] = hidden_size lowercase__ : Any = rotary_dim lowercase__ : Optional[Any] = num_hidden_layers lowercase__ : Tuple = num_attention_heads lowercase__ : Tuple = intermediate_size lowercase__ : List[str] = hidden_act lowercase__ : Optional[Any] = hidden_dropout_prob lowercase__ : int = attention_probs_dropout_prob lowercase__ : Any = max_position_embeddings lowercase__ : Optional[int] = initializer_range lowercase__ : Optional[int] = None lowercase__ : str = vocab_size - 1 lowercase__ : Any = vocab_size - 1 lowercase__ : Dict = vocab_size - 1 def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ : Any = None if self.use_input_mask: lowercase__ : Dict = random_attention_mask([self.batch_size, self.seq_length]) lowercase__ : List[Any] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Optional[Any] = config_and_inputs lowercase__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = 20 lowercase__ : int = model_class_name(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_) lowercase__ : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""") lowercase__ : Tuple = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1)) lowercase__ : List[str] = model( input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""") lowercase__ : str = model( input_ids[:, -1:] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE_) lowercase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}') def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Union[str, Any] = 20 lowercase__ : List[Any] = model_class_name(SCREAMING_SNAKE_CASE_) lowercase__ : Dict = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , ) lowercase__ : Dict = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1)) lowercase__ : Any = model( input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""") lowercase__ : Tuple = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_) lowercase__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}') @require_flax class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Dict = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __lowerCAmelCase : str = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = FlaxGPTJModelTester(self) def lowercase__ ( self): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__ , lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__ , lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) @tooslow def lowercase__ ( self): '''simple docstring''' lowercase__ : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""") lowercase__ : List[str] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""") lowercase__ : Optional[Any] = False lowercase__ : List[str] = model.config.eos_token_id lowercase__ : List[Any] = jax.jit(model.generate) lowercase__ : Tuple = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id).sequences lowercase__ : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) @is_pt_flax_cross_test def lowercase__ ( self): '''simple docstring''' lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Any = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning lowercase__ : str = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ , lowercase__ : Dict = pt_inputs["""input_ids"""].shape lowercase__ : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,)) for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : str = 0 lowercase__ : List[Any] = 1 lowercase__ : Dict = 0 lowercase__ : Any = 1 lowercase__ : List[Any] = pt_model_class(SCREAMING_SNAKE_CASE_).eval() lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa) lowercase__ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = fx_state with torch.no_grad(): lowercase__ : Optional[int] = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple() lowercase__ : Dict = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_) lowercase__ : str = fx_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual( len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output_loaded, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2) @is_pt_flax_cross_test def lowercase__ ( self): '''simple docstring''' lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs lowercase__ : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : str = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning lowercase__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : str = pt_model_class(SCREAMING_SNAKE_CASE_).eval() lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa) lowercase__ : Optional[int] = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , fx_model.params) lowercase__ , lowercase__ : str = pt_inputs["""input_ids"""].shape lowercase__ : List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,)) for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Tuple = 0 lowercase__ : int = 1 lowercase__ : str = 0 lowercase__ : str = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): lowercase__ : Dict = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple() lowercase__ : Optional[Any] = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = pt_model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_flax=SCREAMING_SNAKE_CASE_) with torch.no_grad(): lowercase__ : Tuple = pt_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual( len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) @tooslow def lowercase__ ( self): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__ : Any = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""") lowercase__ : int = model(np.ones((1, 1))) self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
12
import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def UpperCamelCase ( ) -> List[Any]: '''simple docstring''' raise RuntimeError("""CUDA out of memory.""" ) class _snake_case ( nn.Module ): def __init__( self): '''simple docstring''' super().__init__() lowercase__ : Optional[Any] = nn.Linear(3 , 4) lowercase__ : Union[str, Any] = nn.BatchNormad(4) lowercase__ : str = nn.Linear(4 , 5) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE_))) class _snake_case ( unittest.TestCase ): def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = [] @find_executable_batch_size(starting_batch_size=1_28) def mock_training_loop_function(SCREAMING_SNAKE_CASE_): nonlocal batch_sizes batch_sizes.append(SCREAMING_SNAKE_CASE_) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(SCREAMING_SNAKE_CASE_ , [1_28, 64, 32, 16, 8]) def lowercase__ ( self): '''simple docstring''' lowercase__ : int = [] @find_executable_batch_size(starting_batch_size=1_28) def mock_training_loop_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): nonlocal batch_sizes batch_sizes.append(SCREAMING_SNAKE_CASE_) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga lowercase__ , lowercase__ : int = mock_training_loop_function("""hello""") self.assertListEqual(SCREAMING_SNAKE_CASE_ , [1_28, 64, 32, 16, 8]) self.assertListEqual([bs, arga] , [8, """hello"""]) def lowercase__ ( self): '''simple docstring''' @find_executable_batch_size(starting_batch_size=0) def mock_training_loop_function(SCREAMING_SNAKE_CASE_): pass with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0]) def lowercase__ ( self): '''simple docstring''' @find_executable_batch_size(starting_batch_size=16) def mock_training_loop_function(SCREAMING_SNAKE_CASE_): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0]) def lowercase__ ( self): '''simple docstring''' @find_executable_batch_size(starting_batch_size=1_28) def mock_training_loop_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm: mock_training_loop_function(1_28 , """hello""" , """world""") self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0]) self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0]) def lowercase__ ( self): '''simple docstring''' @find_executable_batch_size(starting_batch_size=16) def mock_training_loop_function(SCREAMING_SNAKE_CASE_): raise ValueError("""Oops, we had an error!""") with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm: mock_training_loop_function() self.assertIn("""Oops, we had an error!""" , cm.exception.args[0]) @require_cuda def lowercase__ ( self): '''simple docstring''' lowercase__ : str = torch.cuda.memory_allocated() lowercase__ : str = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = release_memory(SCREAMING_SNAKE_CASE_) self.assertEqual(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE_)
12
1
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: lowerCamelCase__ : str = None lowerCamelCase__ : int = logging.get_logger(__name__) lowerCamelCase__ : Optional[int] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} lowerCamelCase__ : List[str] = { """vocab_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json""" ), }, } lowerCamelCase__ : Union[str, Any] = { """facebook/nllb-large-en-ro""": 1_0_2_4, """facebook/nllb-200-distilled-600M""": 1_0_2_4, } # fmt: off lowerCamelCase__ : Tuple = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""] class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : List[str] = VOCAB_FILES_NAMES __lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Tuple = ['input_ids', 'attention_mask'] __lowerCAmelCase : List[Any] = NllbTokenizer __lowerCAmelCase : List[int] = [] __lowerCAmelCase : List[int] = [] def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' lowercase__ : Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else mask_token lowercase__ : Optional[int] = legacy_behaviour super().__init__( vocab_file=SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , src_lang=SCREAMING_SNAKE_CASE_ , tgt_lang=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , legacy_behaviour=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) lowercase__ : int = vocab_file lowercase__ : Any = False if not self.vocab_file else True lowercase__ : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens}) lowercase__ : Optional[Any] = { lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) for lang_code in FAIRSEQ_LANGUAGE_CODES } lowercase__ : int = src_lang if src_lang is not None else """eng_Latn""" lowercase__ : Tuple = self.convert_tokens_to_ids(self._src_lang) lowercase__ : Dict = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def lowercase__ ( self): '''simple docstring''' return self._src_lang @src_lang.setter def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : int = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None): '''simple docstring''' lowercase__ : Optional[int] = [self.sep_token_id] lowercase__ : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""") lowercase__ : Tuple = src_lang lowercase__ : Any = self(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : str = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = tgt_lang_id return inputs def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "eng_Latn" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "fra_Latn" , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' lowercase__ : Optional[int] = src_lang lowercase__ : List[str] = tgt_lang return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang) def lowercase__ ( self): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Optional[int] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) if self.legacy_behaviour: lowercase__ : Optional[Any] = [] lowercase__ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code] else: lowercase__ : Optional[Any] = [self.cur_lang_code] lowercase__ : Tuple = [self.eos_token_id] lowercase__ : int = self.convert_ids_to_tokens(self.prefix_tokens) lowercase__ : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens) lowercase__ : str = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : int = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) if self.legacy_behaviour: lowercase__ : Dict = [] lowercase__ : Dict = [self.eos_token_id, self.cur_lang_code] else: lowercase__ : Tuple = [self.cur_lang_code] lowercase__ : str = [self.eos_token_id] lowercase__ : Dict = self.convert_ids_to_tokens(self.prefix_tokens) lowercase__ : int = self.convert_ids_to_tokens(self.suffix_tokens) lowercase__ : Optional[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""") if not os.path.isdir(SCREAMING_SNAKE_CASE_): logger.error(f'Vocabulary path ({save_directory}) should be a directory.') return lowercase__ : List[Any] = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE_): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_) return (out_vocab_file,)
12
import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def UpperCamelCase ( lowercase_ ) -> Any: '''simple docstring''' lowercase__ : Optional[Any] = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowercase__ : List[str] = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: lowercase__ : Optional[int] = 4 lowercase__ : Optional[Any] = 48 lowercase__ : int = """pixelshuffle_aux""" elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowercase__ : List[str] = [6, 6, 6, 6] lowercase__ : Any = 60 lowercase__ : Tuple = [6, 6, 6, 6] lowercase__ : Dict = """pixelshuffledirect""" elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowercase__ : Tuple = 4 lowercase__ : Any = """nearest+conv""" elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: lowercase__ : str = 1 lowercase__ : Optional[int] = 1 lowercase__ : Optional[int] = 1_26 lowercase__ : Any = 7 lowercase__ : int = 255.0 lowercase__ : List[Any] = """""" return config def UpperCamelCase ( lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' if "patch_embed.proj" in name and "layers" not in name: lowercase__ : Dict = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: lowercase__ : Dict = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" ) if "layers" in name: lowercase__ : List[str] = name.replace("""layers""" , """encoder.stages""" ) if "residual_group.blocks" in name: lowercase__ : Optional[int] = name.replace("""residual_group.blocks""" , """layers""" ) if "attn.proj" in name: lowercase__ : int = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: lowercase__ : Tuple = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: lowercase__ : int = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: lowercase__ : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: lowercase__ : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowercase__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" ) if "q_bias" in name: lowercase__ : Any = name.replace("""q_bias""" , """query.bias""" ) if "k_bias" in name: lowercase__ : Optional[Any] = name.replace("""k_bias""" , """key.bias""" ) if "v_bias" in name: lowercase__ : Dict = name.replace("""v_bias""" , """value.bias""" ) if "cpb_mlp" in name: lowercase__ : Union[str, Any] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" ) if "patch_embed.proj" in name: lowercase__ : List[Any] = name.replace("""patch_embed.proj""" , """patch_embed.projection""" ) if name == "norm.weight": lowercase__ : Union[str, Any] = """layernorm.weight""" if name == "norm.bias": lowercase__ : List[str] = """layernorm.bias""" if "conv_first" in name: lowercase__ : Union[str, Any] = name.replace("""conv_first""" , """first_convolution""" ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: lowercase__ : List[Any] = name.replace("""conv_last""" , """final_convolution""" ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: lowercase__ : Optional[int] = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" ) if "upsample.0" in name: lowercase__ : Dict = name.replace("""upsample.0""" , """upsample.convolution_0""" ) if "upsample.2" in name: lowercase__ : Optional[Any] = name.replace("""upsample.2""" , """upsample.convolution_1""" ) lowercase__ : List[str] = """upsample.""" + name elif config.upsampler == "pixelshuffledirect": lowercase__ : Optional[Any] = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" ) lowercase__ : int = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" ) else: pass else: lowercase__ : str = """swin2sr.""" + name return name def UpperCamelCase ( lowercase_ , lowercase_ ) -> int: '''simple docstring''' for key in orig_state_dict.copy().keys(): lowercase__ : str = orig_state_dict.pop(lowercase_ ) if "qkv" in key: lowercase__ : Any = key.split(""".""" ) lowercase__ : List[Any] = int(key_split[1] ) lowercase__ : Dict = int(key_split[4] ) lowercase__ : Optional[Any] = config.embed_dim if "weight" in key: lowercase__ : List[str] = val[:dim, :] lowercase__ : List[str] = val[dim : dim * 2, :] lowercase__ : Optional[Any] = val[-dim:, :] else: lowercase__ : Optional[Any] = val[:dim] lowercase__ : List[Any] = val[dim : dim * 2] lowercase__ : Optional[int] = val[-dim:] pass else: lowercase__ : Optional[Any] = val return orig_state_dict def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' lowercase__ : Dict = get_config(lowercase_ ) lowercase__ : Any = SwinaSRForImageSuperResolution(lowercase_ ) model.eval() lowercase__ : List[str] = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" ) lowercase__ : Union[str, Any] = convert_state_dict(lowercase_ , lowercase_ ) lowercase__ , lowercase__ : Dict = model.load_state_dict(lowercase_ , strict=lowercase_ ) if len(lowercase_ ) > 0: raise ValueError("""Missing keys when converting: {}""".format(lowercase_ ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(F'Unexpected key {key} in state_dict' ) # verify values lowercase__ : Any = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true""" lowercase__ : Any = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert("""RGB""" ) lowercase__ : Any = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values lowercase__ : Optional[int] = 1_26 if """Jpeg""" in checkpoint_url else 2_56 lowercase__ : Union[str, Any] = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) lowercase__ : Dict = transforms(lowercase_ ).unsqueeze(0 ) if config.num_channels == 1: lowercase__ : Any = pixel_values[:, 0, :, :].unsqueeze(1 ) lowercase__ : Union[str, Any] = model(lowercase_ ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: lowercase__ : Optional[Any] = torch.Size([1, 3, 5_12, 5_12] ) lowercase__ : Optional[Any] = torch.tensor( [[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowercase__ : List[str] = torch.Size([1, 3, 10_24, 10_24] ) lowercase__ : int = torch.tensor( [[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here lowercase__ : Optional[Any] = torch.Size([1, 3, 10_24, 10_24] ) lowercase__ : int = torch.tensor( [[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowercase__ : Tuple = torch.Size([1, 3, 5_12, 5_12] ) lowercase__ : int = torch.tensor( [[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowercase__ : Tuple = torch.Size([1, 3, 10_24, 10_24] ) lowercase__ : int = torch.tensor( [[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] ) assert ( outputs.reconstruction.shape == expected_shape ), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowercase_ , atol=1E-3 ) print("""Looks ok!""" ) lowercase__ : str = { """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": ( """swin2SR-classical-sr-x2-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": ( """swin2SR-classical-sr-x4-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": ( """swin2SR-compressed-sr-x4-48""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": ( """swin2SR-lightweight-x2-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": ( """swin2SR-realworld-sr-x4-64-bsrgan-psnr""" ), } lowercase__ : str = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowercase_ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(lowercase_ ) if push_to_hub: model.push_to_hub(F'caidas/{model_name}' ) processor.push_to_hub(F'caidas/{model_name}' ) if __name__ == "__main__": lowerCamelCase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""", type=str, help="""URL of the original Swin2SR checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""") lowerCamelCase__ : Any = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
12
1
import numpy as np def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ = 1E-12 , lowercase_ = 1_00 , ) -> tuple[float, np.ndarray]: '''simple docstring''' assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[1] # Ensure proper dimensionality. assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(lowercase_ ) == np.iscomplexobj(lowercase_ ) lowercase__ : Dict = np.iscomplexobj(lowercase_ ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(lowercase_ , input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. lowercase__ : Optional[Any] = False lowercase__ : Union[str, Any] = 0 lowercase__ : Tuple = 0 lowercase__ : Dict = 1E12 while not convergence: # Multiple matrix by the vector. lowercase__ : Tuple = np.dot(lowercase_ , lowercase_ ) # Normalize the resulting output vector. lowercase__ : int = w / np.linalg.norm(lowercase_ ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) lowercase__ : List[Any] = vector.conj().T if is_complex else vector.T lowercase__ : Optional[Any] = np.dot(lowercase_ , np.dot(lowercase_ , lowercase_ ) ) # Check convergence. lowercase__ : Union[str, Any] = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: lowercase__ : Any = True lowercase__ : List[str] = lambda_ if is_complex: lowercase__ : Tuple = np.real(lambda_ ) return lambda_, vector def UpperCamelCase ( ) -> None: '''simple docstring''' lowercase__ : Dict = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) lowercase__ : Any = np.array([41, 4, 20] ) lowercase__ : Optional[int] = real_input_matrix.astype(np.complexaaa ) lowercase__ : Tuple = np.triu(1J * complex_input_matrix , 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T lowercase__ : Any = np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": lowercase__ : Tuple = real_input_matrix lowercase__ : Optional[Any] = real_vector elif problem_type == "complex": lowercase__ : Optional[int] = complex_input_matrix lowercase__ : int = complex_vector # Our implementation. lowercase__ , lowercase__ : Tuple = power_iteration(lowercase_ , lowercase_ ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). lowercase__ , lowercase__ : Dict = np.linalg.eigh(lowercase_ ) # Last eigenvalue is the maximum one. lowercase__ : List[Any] = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. lowercase__ : Any = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(lowercase_ ) - np.abs(lowercase_ ) ) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
12
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : BigBirdConfig __lowerCAmelCase : jnp.dtype = jnp.floataa __lowerCAmelCase : bool = True def lowercase__ ( self): '''simple docstring''' super().setup() lowercase__ : Dict = nn.Dense(5 , dtype=self.dtype) def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : List[str] = super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = self.cls(outputs[2]) return outputs[:2] + (cls_out,) class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Optional[int] = FlaxBigBirdForNaturalQuestionsModule def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' def cross_entropy(lowercase_ , lowercase_ , lowercase_=None ): lowercase__ : int = logits.shape[-1] lowercase__ : List[str] = (labels[..., None] == jnp.arange(lowercase_ )[None]).astype("""f4""" ) lowercase__ : int = jax.nn.log_softmax(lowercase_ , axis=-1 ) lowercase__ : Any = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: lowercase__ : Optional[int] = reduction(lowercase_ ) return loss lowercase__ : int = partial(lowercase_ , reduction=jnp.mean ) lowercase__ : Tuple = cross_entropy(lowercase_ , lowercase_ ) lowercase__ : List[Any] = cross_entropy(lowercase_ , lowercase_ ) lowercase__ : Union[str, Any] = cross_entropy(lowercase_ , lowercase_ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class _snake_case : __lowerCAmelCase : str = "google/bigbird-roberta-base" __lowerCAmelCase : int = 3_000 __lowerCAmelCase : int = 10_500 __lowerCAmelCase : int = 128 __lowerCAmelCase : int = 3 __lowerCAmelCase : int = 1 __lowerCAmelCase : int = 5 # tx_args __lowerCAmelCase : float = 3e-5 __lowerCAmelCase : float = 0.0 __lowerCAmelCase : int = 20_000 __lowerCAmelCase : float = 0.0_095 __lowerCAmelCase : str = "bigbird-roberta-natural-questions" __lowerCAmelCase : str = "training-expt" __lowerCAmelCase : str = "data/nq-training.jsonl" __lowerCAmelCase : str = "data/nq-validation.jsonl" def lowercase__ ( self): '''simple docstring''' os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE_) lowercase__ : Any = os.path.join(self.base_dir , self.save_dir) lowercase__ : str = self.batch_size_per_device * jax.device_count() @dataclass class _snake_case : __lowerCAmelCase : int __lowerCAmelCase : int = 4_096 # no dynamic padding on TPUs def __call__( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Dict = self.collate_fn(SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) return batch def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ , lowercase__ : str = self.fetch_inputs(features["""input_ids"""]) lowercase__ : str = { """input_ids""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa), """attention_mask""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa), """start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa), """end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa), """pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa), } return batch def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : List[Any] = [self._fetch_inputs(SCREAMING_SNAKE_CASE_) for ids in input_ids] return zip(*SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = [1 for _ in range(len(SCREAMING_SNAKE_CASE_))] while len(SCREAMING_SNAKE_CASE_) < self.max_length: input_ids.append(self.pad_id) attention_mask.append(0) return input_ids, attention_mask def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=None ) -> Optional[Any]: '''simple docstring''' if seed is not None: lowercase__ : Any = dataset.shuffle(seed=lowercase_ ) for i in range(len(lowercase_ ) // batch_size ): lowercase__ : List[str] = dataset[i * batch_size : (i + 1) * batch_size] yield dict(lowercase_ ) @partial(jax.pmap , axis_name="""batch""" ) def UpperCamelCase ( lowercase_ , lowercase_ , **lowercase_ ) -> int: '''simple docstring''' def loss_fn(lowercase_ ): lowercase__ : Dict = model_inputs.pop("""start_labels""" ) lowercase__ : List[Any] = model_inputs.pop("""end_labels""" ) lowercase__ : List[Any] = model_inputs.pop("""pooled_labels""" ) lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=lowercase_ , dropout_rng=lowercase_ , train=lowercase_ ) lowercase__ , lowercase__ , lowercase__ : Any = outputs return state.loss_fn( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) lowercase__ , lowercase__ : Optional[int] = jax.random.split(lowercase_ ) lowercase__ : Tuple = jax.value_and_grad(lowercase_ ) lowercase__ , lowercase__ : Optional[int] = grad_fn(state.params ) lowercase__ : Tuple = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) lowercase__ : Any = jax.lax.pmean(lowercase_ , """batch""" ) lowercase__ : str = state.apply_gradients(grads=lowercase_ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="""batch""" ) def UpperCamelCase ( lowercase_ , **lowercase_ ) -> str: '''simple docstring''' lowercase__ : Tuple = model_inputs.pop("""start_labels""" ) lowercase__ : List[str] = model_inputs.pop("""end_labels""" ) lowercase__ : int = model_inputs.pop("""pooled_labels""" ) lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=state.params , train=lowercase_ ) lowercase__ , lowercase__ , lowercase__ : Optional[int] = outputs lowercase__ : Optional[Any] = state.loss_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : List[str] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) return metrics class _snake_case ( train_state.TrainState ): __lowerCAmelCase : Callable = struct.field(pytree_node=UpperCAmelCase_ ) @dataclass class _snake_case : __lowerCAmelCase : Args __lowerCAmelCase : Callable __lowerCAmelCase : Callable __lowerCAmelCase : Callable __lowerCAmelCase : Callable __lowerCAmelCase : wandb __lowerCAmelCase : Callable = None def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None): '''simple docstring''' lowercase__ : List[str] = model.params lowercase__ : Dict = TrainState.create( apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , loss_fn=SCREAMING_SNAKE_CASE_ , ) if ckpt_dir is not None: lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = restore_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : str = { """lr""": args.lr, """init_lr""": args.init_lr, """warmup_steps""": args.warmup_steps, """num_train_steps""": num_train_steps, """weight_decay""": args.weight_decay, } lowercase__ , lowercase__ : Any = build_tx(**SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = train_state.TrainState( step=SCREAMING_SNAKE_CASE_ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , opt_state=SCREAMING_SNAKE_CASE_ , ) lowercase__ : Optional[Any] = args lowercase__ : Union[str, Any] = data_collator lowercase__ : str = lr lowercase__ : Union[str, Any] = params lowercase__ : Dict = jax_utils.replicate(SCREAMING_SNAKE_CASE_) return state def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = self.args lowercase__ : List[str] = len(SCREAMING_SNAKE_CASE_) // args.batch_size lowercase__ : int = jax.random.PRNGKey(0) lowercase__ : Union[str, Any] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count()) for epoch in range(args.max_epochs): lowercase__ : Tuple = jnp.array(0 , dtype=jnp.floataa) lowercase__ : List[str] = get_batched_dataset(SCREAMING_SNAKE_CASE_ , args.batch_size , seed=SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = 0 for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc=f'Running EPOCH-{epoch}'): lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_) lowercase__ , lowercase__ , lowercase__ : List[Any] = self.train_step_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) running_loss += jax_utils.unreplicate(metrics["""loss"""]) i += 1 if i % args.logging_steps == 0: lowercase__ : List[str] = jax_utils.unreplicate(state.step) lowercase__ : str = running_loss.item() / i lowercase__ : Tuple = self.scheduler_fn(state_step - 1) lowercase__ : Tuple = self.evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = { """step""": state_step.item(), """eval_loss""": eval_loss.item(), """tr_loss""": tr_loss, """lr""": lr.item(), } tqdm.write(str(SCREAMING_SNAKE_CASE_)) self.logger.log(SCREAMING_SNAKE_CASE_ , commit=SCREAMING_SNAKE_CASE_) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Dict = get_batched_dataset(SCREAMING_SNAKE_CASE_ , self.args.batch_size) lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE_) // self.args.batch_size lowercase__ : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa) lowercase__ : Optional[Any] = 0 for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc="""Evaluating ... """): lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = self.val_step_fn(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) running_loss += jax_utils.unreplicate(metrics["""loss"""]) i += 1 return running_loss / i def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = jax_utils.unreplicate(SCREAMING_SNAKE_CASE_) print(f'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """) self.model_save_fn(SCREAMING_SNAKE_CASE_ , params=state.params) with open(os.path.join(SCREAMING_SNAKE_CASE_ , """opt_state.msgpack""") , """wb""") as f: f.write(to_bytes(state.opt_state)) joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE_ , """args.joblib""")) joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE_ , """data_collator.joblib""")) with open(os.path.join(SCREAMING_SNAKE_CASE_ , """training_state.json""") , """w""") as f: json.dump({"""step""": state.step.item()} , SCREAMING_SNAKE_CASE_) print("""DONE""") def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ ) with open(os.path.join(lowercase_ , """flax_model.msgpack""" ) , """rb""" ) as f: lowercase__ : Optional[Any] = from_bytes(state.params , f.read() ) with open(os.path.join(lowercase_ , """opt_state.msgpack""" ) , """rb""" ) as f: lowercase__ : Dict = from_bytes(state.opt_state , f.read() ) lowercase__ : Any = joblib.load(os.path.join(lowercase_ , """args.joblib""" ) ) lowercase__ : Optional[int] = joblib.load(os.path.join(lowercase_ , """data_collator.joblib""" ) ) with open(os.path.join(lowercase_ , """training_state.json""" ) , """r""" ) as f: lowercase__ : int = json.load(lowercase_ ) lowercase__ : Optional[Any] = training_state["""step"""] print("""DONE""" ) return params, opt_state, step, args, data_collator def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' lowercase__ : Optional[int] = num_train_steps - warmup_steps lowercase__ : int = optax.linear_schedule(init_value=lowercase_ , end_value=lowercase_ , transition_steps=lowercase_ ) lowercase__ : Optional[int] = optax.linear_schedule(init_value=lowercase_ , end_value=1E-7 , transition_steps=lowercase_ ) lowercase__ : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]: '''simple docstring''' def weight_decay_mask(lowercase_ ): lowercase__ : Dict = traverse_util.flatten_dict(lowercase_ ) lowercase__ : int = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()} return traverse_util.unflatten_dict(lowercase_ ) lowercase__ : Optional[int] = scheduler_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : int = optax.adamw(learning_rate=lowercase_ , weight_decay=lowercase_ , mask=lowercase_ ) return tx, lr
12
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
12
lowerCamelCase__ : List[str] = """ # Installazione di Transformers ! pip install transformers datasets # Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git """ lowerCamelCase__ : List[Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowerCamelCase__ : int = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
12
1
def UpperCamelCase ( lowercase_ = 50 ) -> int: '''simple docstring''' lowercase__ : List[Any] = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f'''{solution() = }''')
12
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class _snake_case : def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ): '''simple docstring''' lowercase__ : str = parent lowercase__ : Optional[int] = batch_size lowercase__ : Optional[int] = seq_length lowercase__ : Union[str, Any] = is_training lowercase__ : Any = use_input_mask lowercase__ : Optional[int] = use_token_type_ids lowercase__ : Optional[Any] = use_labels lowercase__ : Optional[int] = vocab_size lowercase__ : Optional[Any] = hidden_size lowercase__ : Any = rotary_dim lowercase__ : Optional[Any] = num_hidden_layers lowercase__ : Tuple = num_attention_heads lowercase__ : Tuple = intermediate_size lowercase__ : List[str] = hidden_act lowercase__ : Optional[Any] = hidden_dropout_prob lowercase__ : int = attention_probs_dropout_prob lowercase__ : Any = max_position_embeddings lowercase__ : Optional[int] = initializer_range lowercase__ : Optional[int] = None lowercase__ : str = vocab_size - 1 lowercase__ : Any = vocab_size - 1 lowercase__ : Dict = vocab_size - 1 def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ : Any = None if self.use_input_mask: lowercase__ : Dict = random_attention_mask([self.batch_size, self.seq_length]) lowercase__ : List[Any] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Optional[Any] = config_and_inputs lowercase__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = 20 lowercase__ : int = model_class_name(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_) lowercase__ : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""") lowercase__ : Tuple = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1)) lowercase__ : List[str] = model( input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""") lowercase__ : str = model( input_ids[:, -1:] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE_) lowercase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}') def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Union[str, Any] = 20 lowercase__ : List[Any] = model_class_name(SCREAMING_SNAKE_CASE_) lowercase__ : Dict = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , ) lowercase__ : Dict = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1)) lowercase__ : Any = model( input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""") lowercase__ : Tuple = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_) lowercase__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}') @require_flax class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Dict = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __lowerCAmelCase : str = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = FlaxGPTJModelTester(self) def lowercase__ ( self): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__ , lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__ , lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) @tooslow def lowercase__ ( self): '''simple docstring''' lowercase__ : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""") lowercase__ : List[str] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""") lowercase__ : Optional[Any] = False lowercase__ : List[str] = model.config.eos_token_id lowercase__ : List[Any] = jax.jit(model.generate) lowercase__ : Tuple = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id).sequences lowercase__ : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) @is_pt_flax_cross_test def lowercase__ ( self): '''simple docstring''' lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Any = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning lowercase__ : str = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ , lowercase__ : Dict = pt_inputs["""input_ids"""].shape lowercase__ : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,)) for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : str = 0 lowercase__ : List[Any] = 1 lowercase__ : Dict = 0 lowercase__ : Any = 1 lowercase__ : List[Any] = pt_model_class(SCREAMING_SNAKE_CASE_).eval() lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa) lowercase__ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = fx_state with torch.no_grad(): lowercase__ : Optional[int] = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple() lowercase__ : Dict = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_) lowercase__ : str = fx_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual( len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output_loaded, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2) @is_pt_flax_cross_test def lowercase__ ( self): '''simple docstring''' lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs lowercase__ : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : str = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning lowercase__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : str = pt_model_class(SCREAMING_SNAKE_CASE_).eval() lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa) lowercase__ : Optional[int] = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , fx_model.params) lowercase__ , lowercase__ : str = pt_inputs["""input_ids"""].shape lowercase__ : List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,)) for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Tuple = 0 lowercase__ : int = 1 lowercase__ : str = 0 lowercase__ : str = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): lowercase__ : Dict = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple() lowercase__ : Optional[Any] = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = pt_model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_flax=SCREAMING_SNAKE_CASE_) with torch.no_grad(): lowercase__ : Tuple = pt_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual( len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) @tooslow def lowercase__ ( self): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__ : Any = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""") lowercase__ : int = model(np.ones((1, 1))) self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
12
1
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__) class _snake_case ( UpperCAmelCase_ ): def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' warnings.warn( """The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use FlavaImageProcessor instead.""" , SCREAMING_SNAKE_CASE_ , ) super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
12
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Any = ['image_processor', 'tokenizer'] __lowerCAmelCase : Union[str, Any] = 'AutoImageProcessor' __lowerCAmelCase : int = 'AutoTokenizer' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = self.image_processor def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""") if text is not None: lowercase__ : List[str] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) if images is not None: lowercase__ : Optional[int] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) if text is not None and images is not None: lowercase__ : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_) , tensor_type=SCREAMING_SNAKE_CASE_) def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) @property def lowercase__ ( self): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
12
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=UpperCAmelCase_ ) class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} ) __lowerCAmelCase : ClassVar[Features] = Features({'audio': Audio()} ) __lowerCAmelCase : ClassVar[Features] = Features({'labels': ClassLabel} ) __lowerCAmelCase : str = "audio" __lowerCAmelCase : str = "labels" def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' if self.label_column not in features: raise ValueError(f'Column {self.label_column} is not present in features.') if not isinstance(features[self.label_column] , SCREAMING_SNAKE_CASE_): raise ValueError(f'Column {self.label_column} is not a ClassLabel.') lowercase__ : Union[str, Any] = copy.deepcopy(self) lowercase__ : Optional[int] = self.label_schema.copy() lowercase__ : Tuple = features[self.label_column] lowercase__ : Optional[Any] = label_schema return task_template @property def lowercase__ ( self): '''simple docstring''' return { self.audio_column: "audio", self.label_column: "labels", }
12
def UpperCamelCase ( lowercase_ ) -> int: '''simple docstring''' if n == 1 or not isinstance(lowercase_ , lowercase_ ): return 0 elif n == 2: return 1 else: lowercase__ : List[Any] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def UpperCamelCase ( lowercase_ ) -> int: '''simple docstring''' lowercase__ : Optional[Any] = 0 lowercase__ : Dict = 2 while digits < n: index += 1 lowercase__ : str = len(str(fibonacci(lowercase_ ) ) ) return index def UpperCamelCase ( lowercase_ = 10_00 ) -> int: '''simple docstring''' return fibonacci_digits_index(lowercase_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
12
1
from __future__ import annotations def UpperCamelCase ( lowercase_ ) -> bool: '''simple docstring''' return len(set(lowercase_ ) ) == len(lowercase_ ) if __name__ == "__main__": import doctest doctest.testmod()
12
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter lowerCamelCase__ : Any = """Create a default config file for Accelerate with only a few flags set.""" def UpperCamelCase ( lowercase_="no" , lowercase_ = default_json_config_file , lowercase_ = False ) -> Any: '''simple docstring''' lowercase__ : Any = Path(lowercase_ ) path.parent.mkdir(parents=lowercase_ , exist_ok=lowercase_ ) if path.exists(): print( F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' ) return False lowercase__ : int = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' ) lowercase__ : Dict = { """compute_environment""": """LOCAL_MACHINE""", """mixed_precision""": mixed_precision, } if torch.cuda.is_available(): lowercase__ : Any = torch.cuda.device_count() lowercase__ : Any = num_gpus lowercase__ : Optional[int] = False if num_gpus > 1: lowercase__ : Tuple = """MULTI_GPU""" else: lowercase__ : Optional[Any] = """NO""" elif is_xpu_available() and use_xpu: lowercase__ : Union[str, Any] = torch.xpu.device_count() lowercase__ : str = num_xpus lowercase__ : List[Any] = False if num_xpus > 1: lowercase__ : str = """MULTI_XPU""" else: lowercase__ : Optional[Any] = """NO""" elif is_npu_available(): lowercase__ : Tuple = torch.npu.device_count() lowercase__ : Union[str, Any] = num_npus lowercase__ : Union[str, Any] = False if num_npus > 1: lowercase__ : List[Any] = """MULTI_NPU""" else: lowercase__ : int = """NO""" else: lowercase__ : Union[str, Any] = 0 lowercase__ : str = True lowercase__ : Union[str, Any] = 1 lowercase__ : int = """NO""" lowercase__ : Tuple = ClusterConfig(**lowercase_ ) config.to_json_file(lowercase_ ) return path def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' lowercase__ : List[str] = parser.add_parser("""default""" , parents=lowercase_ , help=lowercase_ , formatter_class=lowercase_ ) parser.add_argument( """--config_file""" , default=lowercase_ , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) , dest="""save_location""" , ) parser.add_argument( """--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=lowercase_ , help="""Whether or not to use mixed precision training. """ """Choose between FP16 and BF16 (bfloat16) training. """ """BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , ) parser.set_defaults(func=lowercase_ ) return parser def UpperCamelCase ( lowercase_ ) -> Any: '''simple docstring''' lowercase__ : Optional[Any] = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F'accelerate configuration saved at {config_file}' )
12
1
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : BigBirdConfig __lowerCAmelCase : jnp.dtype = jnp.floataa __lowerCAmelCase : bool = True def lowercase__ ( self): '''simple docstring''' super().setup() lowercase__ : Dict = nn.Dense(5 , dtype=self.dtype) def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : List[str] = super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = self.cls(outputs[2]) return outputs[:2] + (cls_out,) class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Optional[int] = FlaxBigBirdForNaturalQuestionsModule def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' def cross_entropy(lowercase_ , lowercase_ , lowercase_=None ): lowercase__ : int = logits.shape[-1] lowercase__ : List[str] = (labels[..., None] == jnp.arange(lowercase_ )[None]).astype("""f4""" ) lowercase__ : int = jax.nn.log_softmax(lowercase_ , axis=-1 ) lowercase__ : Any = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: lowercase__ : Optional[int] = reduction(lowercase_ ) return loss lowercase__ : int = partial(lowercase_ , reduction=jnp.mean ) lowercase__ : Tuple = cross_entropy(lowercase_ , lowercase_ ) lowercase__ : List[Any] = cross_entropy(lowercase_ , lowercase_ ) lowercase__ : Union[str, Any] = cross_entropy(lowercase_ , lowercase_ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class _snake_case : __lowerCAmelCase : str = "google/bigbird-roberta-base" __lowerCAmelCase : int = 3_000 __lowerCAmelCase : int = 10_500 __lowerCAmelCase : int = 128 __lowerCAmelCase : int = 3 __lowerCAmelCase : int = 1 __lowerCAmelCase : int = 5 # tx_args __lowerCAmelCase : float = 3e-5 __lowerCAmelCase : float = 0.0 __lowerCAmelCase : int = 20_000 __lowerCAmelCase : float = 0.0_095 __lowerCAmelCase : str = "bigbird-roberta-natural-questions" __lowerCAmelCase : str = "training-expt" __lowerCAmelCase : str = "data/nq-training.jsonl" __lowerCAmelCase : str = "data/nq-validation.jsonl" def lowercase__ ( self): '''simple docstring''' os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE_) lowercase__ : Any = os.path.join(self.base_dir , self.save_dir) lowercase__ : str = self.batch_size_per_device * jax.device_count() @dataclass class _snake_case : __lowerCAmelCase : int __lowerCAmelCase : int = 4_096 # no dynamic padding on TPUs def __call__( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Dict = self.collate_fn(SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) return batch def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ , lowercase__ : str = self.fetch_inputs(features["""input_ids"""]) lowercase__ : str = { """input_ids""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa), """attention_mask""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa), """start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa), """end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa), """pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa), } return batch def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : List[Any] = [self._fetch_inputs(SCREAMING_SNAKE_CASE_) for ids in input_ids] return zip(*SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = [1 for _ in range(len(SCREAMING_SNAKE_CASE_))] while len(SCREAMING_SNAKE_CASE_) < self.max_length: input_ids.append(self.pad_id) attention_mask.append(0) return input_ids, attention_mask def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=None ) -> Optional[Any]: '''simple docstring''' if seed is not None: lowercase__ : Any = dataset.shuffle(seed=lowercase_ ) for i in range(len(lowercase_ ) // batch_size ): lowercase__ : List[str] = dataset[i * batch_size : (i + 1) * batch_size] yield dict(lowercase_ ) @partial(jax.pmap , axis_name="""batch""" ) def UpperCamelCase ( lowercase_ , lowercase_ , **lowercase_ ) -> int: '''simple docstring''' def loss_fn(lowercase_ ): lowercase__ : Dict = model_inputs.pop("""start_labels""" ) lowercase__ : List[Any] = model_inputs.pop("""end_labels""" ) lowercase__ : List[Any] = model_inputs.pop("""pooled_labels""" ) lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=lowercase_ , dropout_rng=lowercase_ , train=lowercase_ ) lowercase__ , lowercase__ , lowercase__ : Any = outputs return state.loss_fn( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) lowercase__ , lowercase__ : Optional[int] = jax.random.split(lowercase_ ) lowercase__ : Tuple = jax.value_and_grad(lowercase_ ) lowercase__ , lowercase__ : Optional[int] = grad_fn(state.params ) lowercase__ : Tuple = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) lowercase__ : Any = jax.lax.pmean(lowercase_ , """batch""" ) lowercase__ : str = state.apply_gradients(grads=lowercase_ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="""batch""" ) def UpperCamelCase ( lowercase_ , **lowercase_ ) -> str: '''simple docstring''' lowercase__ : Tuple = model_inputs.pop("""start_labels""" ) lowercase__ : List[str] = model_inputs.pop("""end_labels""" ) lowercase__ : int = model_inputs.pop("""pooled_labels""" ) lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=state.params , train=lowercase_ ) lowercase__ , lowercase__ , lowercase__ : Optional[int] = outputs lowercase__ : Optional[Any] = state.loss_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : List[str] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) return metrics class _snake_case ( train_state.TrainState ): __lowerCAmelCase : Callable = struct.field(pytree_node=UpperCAmelCase_ ) @dataclass class _snake_case : __lowerCAmelCase : Args __lowerCAmelCase : Callable __lowerCAmelCase : Callable __lowerCAmelCase : Callable __lowerCAmelCase : Callable __lowerCAmelCase : wandb __lowerCAmelCase : Callable = None def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None): '''simple docstring''' lowercase__ : List[str] = model.params lowercase__ : Dict = TrainState.create( apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , loss_fn=SCREAMING_SNAKE_CASE_ , ) if ckpt_dir is not None: lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = restore_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : str = { """lr""": args.lr, """init_lr""": args.init_lr, """warmup_steps""": args.warmup_steps, """num_train_steps""": num_train_steps, """weight_decay""": args.weight_decay, } lowercase__ , lowercase__ : Any = build_tx(**SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = train_state.TrainState( step=SCREAMING_SNAKE_CASE_ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , opt_state=SCREAMING_SNAKE_CASE_ , ) lowercase__ : Optional[Any] = args lowercase__ : Union[str, Any] = data_collator lowercase__ : str = lr lowercase__ : Union[str, Any] = params lowercase__ : Dict = jax_utils.replicate(SCREAMING_SNAKE_CASE_) return state def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = self.args lowercase__ : List[str] = len(SCREAMING_SNAKE_CASE_) // args.batch_size lowercase__ : int = jax.random.PRNGKey(0) lowercase__ : Union[str, Any] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count()) for epoch in range(args.max_epochs): lowercase__ : Tuple = jnp.array(0 , dtype=jnp.floataa) lowercase__ : List[str] = get_batched_dataset(SCREAMING_SNAKE_CASE_ , args.batch_size , seed=SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = 0 for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc=f'Running EPOCH-{epoch}'): lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_) lowercase__ , lowercase__ , lowercase__ : List[Any] = self.train_step_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) running_loss += jax_utils.unreplicate(metrics["""loss"""]) i += 1 if i % args.logging_steps == 0: lowercase__ : List[str] = jax_utils.unreplicate(state.step) lowercase__ : str = running_loss.item() / i lowercase__ : Tuple = self.scheduler_fn(state_step - 1) lowercase__ : Tuple = self.evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = { """step""": state_step.item(), """eval_loss""": eval_loss.item(), """tr_loss""": tr_loss, """lr""": lr.item(), } tqdm.write(str(SCREAMING_SNAKE_CASE_)) self.logger.log(SCREAMING_SNAKE_CASE_ , commit=SCREAMING_SNAKE_CASE_) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Dict = get_batched_dataset(SCREAMING_SNAKE_CASE_ , self.args.batch_size) lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE_) // self.args.batch_size lowercase__ : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa) lowercase__ : Optional[Any] = 0 for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc="""Evaluating ... """): lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = self.val_step_fn(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) running_loss += jax_utils.unreplicate(metrics["""loss"""]) i += 1 return running_loss / i def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = jax_utils.unreplicate(SCREAMING_SNAKE_CASE_) print(f'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """) self.model_save_fn(SCREAMING_SNAKE_CASE_ , params=state.params) with open(os.path.join(SCREAMING_SNAKE_CASE_ , """opt_state.msgpack""") , """wb""") as f: f.write(to_bytes(state.opt_state)) joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE_ , """args.joblib""")) joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE_ , """data_collator.joblib""")) with open(os.path.join(SCREAMING_SNAKE_CASE_ , """training_state.json""") , """w""") as f: json.dump({"""step""": state.step.item()} , SCREAMING_SNAKE_CASE_) print("""DONE""") def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ ) with open(os.path.join(lowercase_ , """flax_model.msgpack""" ) , """rb""" ) as f: lowercase__ : Optional[Any] = from_bytes(state.params , f.read() ) with open(os.path.join(lowercase_ , """opt_state.msgpack""" ) , """rb""" ) as f: lowercase__ : Dict = from_bytes(state.opt_state , f.read() ) lowercase__ : Any = joblib.load(os.path.join(lowercase_ , """args.joblib""" ) ) lowercase__ : Optional[int] = joblib.load(os.path.join(lowercase_ , """data_collator.joblib""" ) ) with open(os.path.join(lowercase_ , """training_state.json""" ) , """r""" ) as f: lowercase__ : int = json.load(lowercase_ ) lowercase__ : Optional[Any] = training_state["""step"""] print("""DONE""" ) return params, opt_state, step, args, data_collator def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' lowercase__ : Optional[int] = num_train_steps - warmup_steps lowercase__ : int = optax.linear_schedule(init_value=lowercase_ , end_value=lowercase_ , transition_steps=lowercase_ ) lowercase__ : Optional[int] = optax.linear_schedule(init_value=lowercase_ , end_value=1E-7 , transition_steps=lowercase_ ) lowercase__ : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]: '''simple docstring''' def weight_decay_mask(lowercase_ ): lowercase__ : Dict = traverse_util.flatten_dict(lowercase_ ) lowercase__ : int = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()} return traverse_util.unflatten_dict(lowercase_ ) lowercase__ : Optional[int] = scheduler_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : int = optax.adamw(learning_rate=lowercase_ , weight_decay=lowercase_ , mask=lowercase_ ) return tx, lr
12
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ : List[Any] = logging.get_logger(__name__) lowerCamelCase__ : Union[str, Any] = { """YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""", """YituTech/conv-bert-medium-small""": ( """https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json""" ), """YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""", # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Union[str, Any] = 'convbert' def __init__( self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=9 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) lowercase__ : Dict = vocab_size lowercase__ : List[Any] = hidden_size lowercase__ : Optional[Any] = num_hidden_layers lowercase__ : Union[str, Any] = num_attention_heads lowercase__ : List[str] = intermediate_size lowercase__ : Optional[int] = hidden_act lowercase__ : Tuple = hidden_dropout_prob lowercase__ : List[str] = attention_probs_dropout_prob lowercase__ : Tuple = max_position_embeddings lowercase__ : Dict = type_vocab_size lowercase__ : Union[str, Any] = initializer_range lowercase__ : Dict = layer_norm_eps lowercase__ : Tuple = embedding_size lowercase__ : List[str] = head_ratio lowercase__ : Dict = conv_kernel_size lowercase__ : Dict = num_groups lowercase__ : int = classifier_dropout class _snake_case ( UpperCAmelCase_ ): @property def lowercase__ ( self): '''simple docstring''' if self.task == "multiple-choice": lowercase__ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase__ : str = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ])
12
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ : Optional[int] = logging.get_logger(__name__) lowerCamelCase__ : List[str] = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""} class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : int = 'ctrl' __lowerCAmelCase : Optional[int] = ['past_key_values'] __lowerCAmelCase : Optional[Any] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , SCREAMING_SNAKE_CASE_=24_65_34 , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=12_80 , SCREAMING_SNAKE_CASE_=81_92 , SCREAMING_SNAKE_CASE_=48 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1E-6 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' lowercase__ : Optional[Any] = vocab_size lowercase__ : Tuple = n_positions lowercase__ : Tuple = n_embd lowercase__ : str = n_layer lowercase__ : int = n_head lowercase__ : List[Any] = dff lowercase__ : str = resid_pdrop lowercase__ : str = embd_pdrop lowercase__ : List[str] = layer_norm_epsilon lowercase__ : Union[str, Any] = initializer_range lowercase__ : Union[str, Any] = use_cache super().__init__(**SCREAMING_SNAKE_CASE_)
12
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder lowerCamelCase__ : Any = datasets.utils.logging.get_logger(__name__) class _snake_case ( folder_based_builder.FolderBasedBuilderConfig ): __lowerCAmelCase : bool = None __lowerCAmelCase : bool = None class _snake_case ( folder_based_builder.FolderBasedBuilder ): __lowerCAmelCase : Optional[Any] = datasets.Audio() __lowerCAmelCase : Union[str, Any] = 'audio' __lowerCAmelCase : str = AudioFolderConfig __lowerCAmelCase : List[str] # definition at the bottom of the script __lowerCAmelCase : Optional[int] = AudioClassification(audio_column='audio' , label_column='label' ) lowerCamelCase__ : int = [ """.aiff""", """.au""", """.avr""", """.caf""", """.flac""", """.htk""", """.svx""", """.mat4""", """.mat5""", """.mpc2k""", """.ogg""", """.paf""", """.pvf""", """.raw""", """.rf64""", """.sd2""", """.sds""", """.ircam""", """.voc""", """.w64""", """.wav""", """.nist""", """.wavex""", """.wve""", """.xi""", """.mp3""", """.opus""", ] lowerCamelCase__ : int = AUDIO_EXTENSIONS
12
1
def UpperCamelCase ( lowercase_ ) -> int: '''simple docstring''' if divisor % 5 == 0 or divisor % 2 == 0: return 0 lowercase__ : Optional[int] = 1 lowercase__ : Dict = 1 while repunit: lowercase__ : List[Any] = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def UpperCamelCase ( lowercase_ = 1_00_00_00 ) -> int: '''simple docstring''' lowercase__ : Union[str, Any] = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(lowercase_ ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f'''{solution() = }''')
12
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : int = (DDPMScheduler,) def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = { """num_train_timesteps""": 10_00, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", """variance_type""": """fixed_small""", """clip_sample""": True, } config.update(**SCREAMING_SNAKE_CASE_) return config def lowercase__ ( self): '''simple docstring''' for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2]): self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , ) def lowercase__ ( self): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for t in [0, 5_00, 9_99]: self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = self.scheduler_classes[0] lowercase__ : Union[str, Any] = self.get_scheduler_config() lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_) assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87) - 0.0_0_9_7_9)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99) - 0.0_2)) < 1E-5 def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = self.scheduler_classes[0] lowercase__ : str = self.get_scheduler_config() lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_) lowercase__ : int = len(SCREAMING_SNAKE_CASE_) lowercase__ : Any = self.dummy_model() lowercase__ : List[Any] = self.dummy_sample_deter lowercase__ : str = torch.manual_seed(0) for t in reversed(range(SCREAMING_SNAKE_CASE_)): # 1. predict noise residual lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) # 2. predict previous mean of sample x_t-1 lowercase__ : List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowercase__ : str = pred_prev_sample lowercase__ : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_)) lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_)) assert abs(result_sum.item() - 2_5_8.9_6_0_6) < 1E-2 assert abs(result_mean.item() - 0.3_3_7_2) < 1E-3 def lowercase__ ( self): '''simple docstring''' lowercase__ : List[Any] = self.scheduler_classes[0] lowercase__ : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""") lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_) lowercase__ : Dict = len(SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = self.dummy_model() lowercase__ : Union[str, Any] = self.dummy_sample_deter lowercase__ : int = torch.manual_seed(0) for t in reversed(range(SCREAMING_SNAKE_CASE_)): # 1. predict noise residual lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) # 2. predict previous mean of sample x_t-1 lowercase__ : int = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowercase__ : Tuple = pred_prev_sample lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_)) lowercase__ : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_)) assert abs(result_sum.item() - 2_0_2.0_2_9_6) < 1E-2 assert abs(result_mean.item() - 0.2_6_3_1) < 1E-3 def lowercase__ ( self): '''simple docstring''' lowercase__ : str = self.scheduler_classes[0] lowercase__ : int = self.get_scheduler_config() lowercase__ : str = scheduler_class(**SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = [1_00, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = scheduler.timesteps for i, timestep in enumerate(SCREAMING_SNAKE_CASE_): if i == len(SCREAMING_SNAKE_CASE_) - 1: lowercase__ : Optional[int] = -1 else: lowercase__ : Tuple = timesteps[i + 1] lowercase__ : Any = scheduler.previous_timestep(SCREAMING_SNAKE_CASE_) lowercase__ : int = prev_t.item() self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = self.scheduler_classes[0] lowercase__ : List[Any] = self.get_scheduler_config() lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = [1_00, 87, 50, 51, 0] with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`custom_timesteps` must be in descending order."""): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = self.scheduler_classes[0] lowercase__ : List[Any] = self.get_scheduler_config() lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_) lowercase__ : int = [1_00, 87, 50, 1, 0] lowercase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE_) with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`."""): scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = self.scheduler_classes[0] lowercase__ : int = self.get_scheduler_config() lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_) lowercase__ : str = [scheduler.config.num_train_timesteps] with self.assertRaises( SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
12
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer lowerCamelCase__ : int = logging.get_logger(__name__) lowerCamelCase__ : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCamelCase__ : int = { """vocab_file""": { """squeezebert/squeezebert-uncased""": ( """https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt""" ), """squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""", """squeezebert/squeezebert-mnli-headless""": ( """https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """squeezebert/squeezebert-uncased""": ( """https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json""" ), """squeezebert/squeezebert-mnli""": ( """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json""" ), """squeezebert/squeezebert-mnli-headless""": ( """https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json""" ), }, } lowerCamelCase__ : Dict = { """squeezebert/squeezebert-uncased""": 5_1_2, """squeezebert/squeezebert-mnli""": 5_1_2, """squeezebert/squeezebert-mnli-headless""": 5_1_2, } lowerCamelCase__ : List[str] = { """squeezebert/squeezebert-uncased""": {"""do_lower_case""": True}, """squeezebert/squeezebert-mnli""": {"""do_lower_case""": True}, """squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True}, } class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES __lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION __lowerCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : Dict = SqueezeBertTokenizer def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' super().__init__( SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) lowercase__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get("""lowercase""" , SCREAMING_SNAKE_CASE_) != do_lower_case or normalizer_state.get("""strip_accents""" , SCREAMING_SNAKE_CASE_) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , SCREAMING_SNAKE_CASE_) != tokenize_chinese_chars ): lowercase__ : Dict = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop("""type""")) lowercase__ : Optional[Any] = do_lower_case lowercase__ : List[Any] = strip_accents lowercase__ : List[str] = tokenize_chinese_chars lowercase__ : Any = normalizer_class(**SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = do_lower_case def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None): '''simple docstring''' lowercase__ : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None): '''simple docstring''' lowercase__ : Dict = [self.sep_token_id] lowercase__ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None): '''simple docstring''' lowercase__ : Optional[int] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_) return tuple(SCREAMING_SNAKE_CASE_)
12
def UpperCamelCase ( lowercase_ ) -> float: '''simple docstring''' if not nums: # Makes sure that the list is not empty raise ValueError("""List is empty""" ) lowercase__ : int = sum(lowercase_ ) / len(lowercase_ ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(lowercase_ ) if __name__ == "__main__": import doctest doctest.testmod()
12
1
from manim import * class _snake_case ( UpperCAmelCase_ ): def lowercase__ ( self): '''simple docstring''' lowercase__ : str = Rectangle(height=0.5 , width=0.5) lowercase__ : Tuple = Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0) lowercase__ : List[Any] = [mem.copy() for i in range(6)] lowercase__ : Tuple = [mem.copy() for i in range(6)] lowercase__ : List[str] = VGroup(*SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0) lowercase__ : Tuple = VGroup(*SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0) lowercase__ : List[Any] = VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0) lowercase__ : Dict = Text("""CPU""" , font_size=24) lowercase__ : List[str] = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_) cpu.move_to([-2.5, -0.5, 0]) self.add(SCREAMING_SNAKE_CASE_) lowercase__ : str = [mem.copy() for i in range(1)] lowercase__ : Any = VGroup(*SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0) lowercase__ : Optional[int] = Text("""GPU""" , font_size=24) lowercase__ : Union[str, Any] = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_) gpu.align_to(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) gpu.set_x(gpu.get_x() - 1) self.add(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = [mem.copy() for i in range(6)] lowercase__ : str = VGroup(*SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0) lowercase__ : Optional[Any] = Text("""Model""" , font_size=24) lowercase__ : Tuple = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_) model.move_to([3, -1.0, 0]) self.play( Create(SCREAMING_SNAKE_CASE_ , run_time=1) , Create(SCREAMING_SNAKE_CASE_ , run_time=1) , Create(SCREAMING_SNAKE_CASE_ , run_time=1) , ) lowercase__ : Optional[Any] = MarkupText( f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , ) lowercase__ : Any = Square(side_length=2.2) key.move_to([-5, 2, 0]) lowercase__ : Optional[int] = MarkupText( f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , ) key_text.move_to([-5, 2.4, 0]) step_a.move_to([2, 2, 0]) self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=2.5) , Write(SCREAMING_SNAKE_CASE_) , Write(SCREAMING_SNAKE_CASE_)) self.add(SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = [] lowercase__ : Union[str, Any] = [] lowercase__ : str = [] for i, rect in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0.0).set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.7) cpu_target.move_to(SCREAMING_SNAKE_CASE_) cpu_target.generate_target() lowercase__ : Union[str, Any] = 0.4_6 / 4 lowercase__ : Tuple = 0.4_6 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.0_2 , direction=SCREAMING_SNAKE_CASE_) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=SCREAMING_SNAKE_CASE_ , buff=0.0) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=SCREAMING_SNAKE_CASE_ , buff=0.0) cpu_targs.append(SCREAMING_SNAKE_CASE_) first_animations.append(rect.animate(run_time=0.5).set_stroke(SCREAMING_SNAKE_CASE_)) second_animations.append(MoveToTarget(SCREAMING_SNAKE_CASE_ , run_time=1.5)) self.play(*SCREAMING_SNAKE_CASE_) self.play(*SCREAMING_SNAKE_CASE_) self.wait()
12
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__) class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Any = ['pixel_values'] def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 8 , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = do_rescale lowercase__ : List[Any] = rescale_factor lowercase__ : Tuple = do_pad lowercase__ : Optional[Any] = pad_size def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None): '''simple docstring''' lowercase__ , lowercase__ : Optional[int] = get_image_size(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = (old_height // size + 1) * size - old_height lowercase__ : str = (old_width // size + 1) * size - old_width return pad(SCREAMING_SNAKE_CASE_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' lowercase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : Union[str, Any] = do_pad if do_pad is not None else self.do_pad lowercase__ : Optional[Any] = pad_size if pad_size is not None else self.pad_size lowercase__ : str = make_list_of_images(SCREAMING_SNAKE_CASE_) if not valid_images(SCREAMING_SNAKE_CASE_): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""") if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""") # All transformations expect numpy arrays. lowercase__ : List[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE_) for image in images] if do_rescale: lowercase__ : str = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_) for image in images] if do_pad: lowercase__ : List[str] = [self.pad(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_) for image in images] lowercase__ : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) for image in images] lowercase__ : Dict = {"""pixel_values""": images} return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_)
12
1
import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class _snake_case ( UpperCAmelCase_ ): def __init__( self): '''simple docstring''' lowercase__ : List[Any] = [] def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_init_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_train_begin""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_train_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_epoch_begin""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_epoch_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_step_begin""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_step_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_evaluate""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_predict""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_save""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_log""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_prediction_step""") @require_torch class _snake_case ( unittest.TestCase ): def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = tempfile.mkdtemp() def lowercase__ ( self): '''simple docstring''' shutil.rmtree(self.output_dir) def lowercase__ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Any = RegressionDataset(length=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = RegressionDataset(length=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = RegressionModelConfig(a=SCREAMING_SNAKE_CASE_ , b=SCREAMING_SNAKE_CASE_) lowercase__ : Any = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE_) lowercase__ : Any = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE_ , report_to=[] , **SCREAMING_SNAKE_CASE_) return Trainer( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , callbacks=SCREAMING_SNAKE_CASE_ , ) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_)) # Order doesn't matter lowercase__ : str = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__) lowercase__ : Tuple = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__) for cba, cba in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assertEqual(SCREAMING_SNAKE_CASE_ , cba.__class__) elif not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE_) else: self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : int = ["""on_init_end""", """on_train_begin"""] lowercase__ : Union[str, Any] = 0 lowercase__ : Union[str, Any] = len(trainer.get_eval_dataloader()) lowercase__ : Dict = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader()) + ["""on_log""", """on_evaluate"""] for _ in range(trainer.state.num_train_epochs): expected_events.append("""on_epoch_begin""") for _ in range(SCREAMING_SNAKE_CASE_): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("""on_log""") if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("""on_save""") expected_events.append("""on_epoch_end""") if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def lowercase__ ( self): '''simple docstring''' lowercase__ : int = self.get_trainer() lowercase__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) # Callbacks passed at init are added to the default callbacks lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback]) expected_callbacks.append(SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback lowercase__ : Any = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback] lowercase__ : Tuple = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.remove(SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = self.get_trainer() lowercase__ : List[Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_) self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) trainer.add_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) # We can also add, pop, or remove by instance lowercase__ : Union[str, Any] = self.get_trainer() lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0] trainer.remove_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.remove(SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) lowercase__ : str = self.get_trainer() lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0] lowercase__ : Union[str, Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) trainer.add_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="""ignore""" , category=SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback]) trainer.train() lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) # Independent log/save/eval lowercase__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5) trainer.train() lowercase__ : List[str] = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5) trainer.train() lowercase__ : Dict = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""") trainer.train() lowercase__ : int = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) lowercase__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""") trainer.train() lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) # A bit of everything lowercase__ : Any = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , ) trainer.train() lowercase__ : str = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) # warning should be emitted for duplicated callbacks with patch("""transformers.trainer_callback.logger.warning""") as warn_mock: lowercase__ : Dict = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(SCREAMING_SNAKE_CASE_) in warn_mock.call_args[0][0]
12
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu lowerCamelCase__ : Optional[int] = [ """EAGER""", """AOT_EAGER""", """INDUCTOR""", """NVFUSER""", """AOT_NVFUSER""", """AOT_CUDAGRAPHS""", """OFI""", """FX2TRT""", """ONNXRT""", """IPEX""", ] def UpperCamelCase ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[Any]: '''simple docstring''' lowercase__ : List[Any] = True while ask_again: lowercase__ : Tuple = input(lowercase_ ) try: if default is not None and len(lowercase_ ) == 0: return default return convert_value(lowercase_ ) if convert_value is not None else result except Exception: if error_message is not None: print(lowercase_ ) def UpperCamelCase ( lowercase_ , lowercase_=[] , lowercase_=None , lowercase_=0 ) -> Union[str, Any]: '''simple docstring''' lowercase__ : List[Any] = BulletMenu(lowercase_ , lowercase_ ) lowercase__ : Any = menu.run(default_choice=lowercase_ ) return convert_value(lowercase_ ) if convert_value is not None else result def UpperCamelCase ( lowercase_ ) -> str: '''simple docstring''' lowercase__ : Union[str, Any] = int(lowercase_ ) return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] ) def UpperCamelCase ( lowercase_ ) -> Optional[int]: '''simple docstring''' lowercase__ : List[str] = int(lowercase_ ) return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] ) def UpperCamelCase ( lowercase_ ) -> str: '''simple docstring''' lowercase__ : str = int(lowercase_ ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def UpperCamelCase ( lowercase_ ) -> Union[str, Any]: '''simple docstring''' lowercase__ : List[Any] = int(lowercase_ ) return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] ) def UpperCamelCase ( lowercase_ ) -> Optional[int]: '''simple docstring''' lowercase__ : List[Any] = int(lowercase_ ) return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] ) def UpperCamelCase ( lowercase_ ) -> Optional[int]: '''simple docstring''' return {"yes": True, "no": False}[value.lower()] class _snake_case ( argparse.RawDescriptionHelpFormatter ): def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : int = super()._format_usage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = usage.replace("""<command> [<args>] """ , """""") return usage
12
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCamelCase__ : List[Any] = logging.get_logger(__name__) lowerCamelCase__ : Dict = { """post_extract_proj""": """feature_projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.upsample.0""": """encoder.upsample.projection""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict: '''simple docstring''' for attribute in key.split(""".""" ): lowercase__ : Union[str, Any] = getattr(lowercase_ , lowercase_ ) if weight_type is not None: lowercase__ : str = getattr(lowercase_ , lowercase_ ).shape else: lowercase__ : Any = hf_pointer.shape assert hf_shape == value.shape, ( F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' F' {value.shape} for {full_name}' ) if weight_type == "weight": lowercase__ : Union[str, Any] = value elif weight_type == "weight_g": lowercase__ : str = value elif weight_type == "weight_v": lowercase__ : str = value elif weight_type == "bias": lowercase__ : Optional[int] = value else: lowercase__ : Any = value logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> List[Any]: '''simple docstring''' lowercase__ : List[Any] = [] lowercase__ : Union[str, Any] = fairseq_model.state_dict() lowercase__ : Any = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): lowercase__ : Optional[int] = False if "conv_layers" in name: load_conv_layer( lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == """group""" , ) lowercase__ : str = True else: for key, mapped_key in MAPPING.items(): lowercase__ : Optional[int] = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: lowercase__ : Tuple = True if "*" in mapped_key: lowercase__ : Optional[Any] = name.split(lowercase_ )[0].split(""".""" )[-2] lowercase__ : Optional[Any] = mapped_key.replace("""*""" , lowercase_ ) if "weight_g" in name: lowercase__ : Optional[int] = """weight_g""" elif "weight_v" in name: lowercase__ : Dict = """weight_v""" elif "weight" in name: lowercase__ : List[str] = """weight""" elif "bias" in name: lowercase__ : str = """bias""" else: lowercase__ : List[Any] = None set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) continue if not is_used: unused_weights.append(lowercase_ ) logger.warning(F'Unused weights: {unused_weights}' ) def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]: '''simple docstring''' lowercase__ : int = full_name.split("""conv_layers.""" )[-1] lowercase__ : int = name.split(""".""" ) lowercase__ : Union[str, Any] = int(items[0] ) lowercase__ : Optional[int] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) lowercase__ : str = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) lowercase__ : Tuple = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) lowercase__ : Union[str, Any] = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) lowercase__ : Optional[Any] = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(lowercase_ ) def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' lowercase__ : Optional[Any] = SEWConfig() if is_finetuned: lowercase__ : Any = model.wav_encoder.wav_model.cfg else: lowercase__ : Dict = model.cfg lowercase__ : Optional[Any] = fs_config.conv_bias lowercase__ : Tuple = eval(fs_config.conv_feature_layers ) lowercase__ : List[str] = [x[0] for x in conv_layers] lowercase__ : Dict = [x[1] for x in conv_layers] lowercase__ : Tuple = [x[2] for x in conv_layers] lowercase__ : List[str] = """gelu""" lowercase__ : Union[str, Any] = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group""" lowercase__ : Union[str, Any] = 0.0 lowercase__ : Tuple = fs_config.activation_fn.name lowercase__ : Tuple = fs_config.encoder_embed_dim lowercase__ : List[str] = 0.02 lowercase__ : Optional[Any] = fs_config.encoder_ffn_embed_dim lowercase__ : Optional[Any] = 1E-5 lowercase__ : List[Any] = fs_config.encoder_layerdrop lowercase__ : Any = fs_config.encoder_attention_heads lowercase__ : Any = fs_config.conv_pos_groups lowercase__ : Dict = fs_config.conv_pos lowercase__ : List[Any] = len(lowercase_ ) lowercase__ : Union[str, Any] = fs_config.encoder_layers lowercase__ : Optional[Any] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: lowercase__ : Optional[Any] = model.cfg lowercase__ : Union[str, Any] = fs_config.final_dropout lowercase__ : int = fs_config.layerdrop lowercase__ : int = fs_config.activation_dropout lowercase__ : Optional[Any] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 lowercase__ : Tuple = fs_config.attention_dropout lowercase__ : Any = fs_config.dropout_input lowercase__ : Any = fs_config.dropout lowercase__ : Dict = fs_config.mask_channel_length lowercase__ : Optional[int] = fs_config.mask_channel_prob lowercase__ : Any = fs_config.mask_length lowercase__ : Dict = fs_config.mask_prob lowercase__ : Dict = """Wav2Vec2FeatureExtractor""" lowercase__ : List[Any] = """Wav2Vec2CTCTokenizer""" return config @torch.no_grad() def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=True ) -> int: '''simple docstring''' if is_finetuned: lowercase__ , lowercase__ , lowercase__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: lowercase__ , lowercase__ , lowercase__ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: lowercase__ : Optional[Any] = SEWConfig.from_pretrained(lowercase_ ) else: lowercase__ : Optional[int] = convert_config(model[0] , lowercase_ ) lowercase__ : Dict = model[0].eval() lowercase__ : Any = True if config.feat_extract_norm == """layer""" else False lowercase__ : List[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , ) if is_finetuned: if dict_path: lowercase__ : Optional[int] = Dictionary.load(lowercase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowercase__ : List[Any] = target_dict.pad_index lowercase__ : int = target_dict.bos_index lowercase__ : Optional[Any] = target_dict.pad_index lowercase__ : List[str] = target_dict.bos_index lowercase__ : Optional[int] = target_dict.eos_index lowercase__ : Union[str, Any] = len(target_dict.symbols ) lowercase__ : Optional[int] = os.path.join(lowercase_ , """vocab.json""" ) if not os.path.isdir(lowercase_ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase_ ) ) return os.makedirs(lowercase_ , exist_ok=lowercase_ ) with open(lowercase_ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices , lowercase_ ) lowercase__ : Optional[Any] = WavaVecaCTCTokenizer( lowercase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase_ , ) lowercase__ : Union[str, Any] = WavaVecaProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ ) processor.save_pretrained(lowercase_ ) lowercase__ : List[str] = SEWForCTC(lowercase_ ) else: lowercase__ : str = SEWModel(lowercase_ ) feature_extractor.save_pretrained(lowercase_ ) recursively_load_weights(lowercase_ , lowercase_ , lowercase_ ) hf_model.save_pretrained(lowercase_ ) if __name__ == "__main__": lowerCamelCase__ : Any = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) lowerCamelCase__ : Optional[int] = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
12
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ : Tuple = { """configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""], """processing_mgp_str""": ["""MgpstrProcessor"""], """tokenization_mgp_str""": ["""MgpstrTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Optional[int] = [ """MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""", """MgpstrModel""", """MgpstrPreTrainedModel""", """MgpstrForSceneTextRecognition""", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
12
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer lowerCamelCase__ : Tuple = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast lowerCamelCase__ : List[str] = TaTokenizerFast lowerCamelCase__ : int = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : str = [ """MT5EncoderModel""", """MT5ForConditionalGeneration""", """MT5ForQuestionAnswering""", """MT5Model""", """MT5PreTrainedModel""", """MT5Stack""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Tuple = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : str = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys lowerCamelCase__ : Tuple = _LazyModule( __name__, globals()["""__file__"""], _import_structure, extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast}, module_spec=__spec__, )
12
import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class _snake_case ( UpperCAmelCase_ ): def __init__( self): '''simple docstring''' lowercase__ : List[Any] = [] def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_init_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_train_begin""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_train_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_epoch_begin""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_epoch_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_step_begin""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_step_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_evaluate""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_predict""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_save""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_log""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_prediction_step""") @require_torch class _snake_case ( unittest.TestCase ): def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = tempfile.mkdtemp() def lowercase__ ( self): '''simple docstring''' shutil.rmtree(self.output_dir) def lowercase__ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Any = RegressionDataset(length=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = RegressionDataset(length=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = RegressionModelConfig(a=SCREAMING_SNAKE_CASE_ , b=SCREAMING_SNAKE_CASE_) lowercase__ : Any = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE_) lowercase__ : Any = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE_ , report_to=[] , **SCREAMING_SNAKE_CASE_) return Trainer( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , callbacks=SCREAMING_SNAKE_CASE_ , ) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_)) # Order doesn't matter lowercase__ : str = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__) lowercase__ : Tuple = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__) for cba, cba in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assertEqual(SCREAMING_SNAKE_CASE_ , cba.__class__) elif not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE_) else: self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : int = ["""on_init_end""", """on_train_begin"""] lowercase__ : Union[str, Any] = 0 lowercase__ : Union[str, Any] = len(trainer.get_eval_dataloader()) lowercase__ : Dict = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader()) + ["""on_log""", """on_evaluate"""] for _ in range(trainer.state.num_train_epochs): expected_events.append("""on_epoch_begin""") for _ in range(SCREAMING_SNAKE_CASE_): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("""on_log""") if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("""on_save""") expected_events.append("""on_epoch_end""") if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def lowercase__ ( self): '''simple docstring''' lowercase__ : int = self.get_trainer() lowercase__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) # Callbacks passed at init are added to the default callbacks lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback]) expected_callbacks.append(SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback lowercase__ : Any = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback] lowercase__ : Tuple = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.remove(SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = self.get_trainer() lowercase__ : List[Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_) self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) trainer.add_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) # We can also add, pop, or remove by instance lowercase__ : Union[str, Any] = self.get_trainer() lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0] trainer.remove_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.remove(SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) lowercase__ : str = self.get_trainer() lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0] lowercase__ : Union[str, Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) trainer.add_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="""ignore""" , category=SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback]) trainer.train() lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) # Independent log/save/eval lowercase__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5) trainer.train() lowercase__ : List[str] = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5) trainer.train() lowercase__ : Dict = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""") trainer.train() lowercase__ : int = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) lowercase__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""") trainer.train() lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) # A bit of everything lowercase__ : Any = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , ) trainer.train() lowercase__ : str = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) # warning should be emitted for duplicated callbacks with patch("""transformers.trainer_callback.logger.warning""") as warn_mock: lowercase__ : Dict = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(SCREAMING_SNAKE_CASE_) in warn_mock.call_args[0][0]
12
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCamelCase__ : List[str] = { """configuration_roberta_prelayernorm""": [ """ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaPreLayerNormConfig""", """RobertaPreLayerNormOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Optional[int] = [ """ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaPreLayerNormForCausalLM""", """RobertaPreLayerNormForMaskedLM""", """RobertaPreLayerNormForMultipleChoice""", """RobertaPreLayerNormForQuestionAnswering""", """RobertaPreLayerNormForSequenceClassification""", """RobertaPreLayerNormForTokenClassification""", """RobertaPreLayerNormModel""", """RobertaPreLayerNormPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : List[str] = [ """TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaPreLayerNormForCausalLM""", """TFRobertaPreLayerNormForMaskedLM""", """TFRobertaPreLayerNormForMultipleChoice""", """TFRobertaPreLayerNormForQuestionAnswering""", """TFRobertaPreLayerNormForSequenceClassification""", """TFRobertaPreLayerNormForTokenClassification""", """TFRobertaPreLayerNormMainLayer""", """TFRobertaPreLayerNormModel""", """TFRobertaPreLayerNormPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : List[Any] = [ """FlaxRobertaPreLayerNormForCausalLM""", """FlaxRobertaPreLayerNormForMaskedLM""", """FlaxRobertaPreLayerNormForMultipleChoice""", """FlaxRobertaPreLayerNormForQuestionAnswering""", """FlaxRobertaPreLayerNormForSequenceClassification""", """FlaxRobertaPreLayerNormForTokenClassification""", """FlaxRobertaPreLayerNormModel""", """FlaxRobertaPreLayerNormPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys lowerCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
12
import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class _snake_case ( UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = RoCBertTokenizer __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : str = False __lowerCAmelCase : List[Any] = True __lowerCAmelCase : Optional[int] = filter_non_english def lowercase__ ( self): '''simple docstring''' super().setUp() lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""] lowercase__ : Dict = {} lowercase__ : Tuple = {} for i, value in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Tuple = i lowercase__ : Any = i lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""]) lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens])) with open(self.word_shape_file , """w""" , encoding="""utf-8""") as word_shape_writer: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_) with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""") as word_pronunciation_writer: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file) lowercase__ : Optional[int] = tokenizer.tokenize("""你好[SEP]你是谁""") self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""]) self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8]) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8]) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8]) def lowercase__ ( self): '''simple docstring''' lowercase__ : int = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""") , ["""ah""", """\u535A""", """\u63A8""", """zz"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""hello""", """!""", """how""", """are""", """you""", """?"""]) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""]) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""h\u00E9llo"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""]) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""]) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=["""[UNK]"""]) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""") , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] lowercase__ : Optional[int] = {} for i, token in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Optional[Any] = i lowercase__ : Union[str, Any] = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token="""[UNK]""") self.assertListEqual(tokenizer.tokenize("""""") , []) self.assertListEqual(tokenizer.tokenize("""unwanted running""") , ["""un""", """##want""", """##ed""", """runn""", """##ing"""]) self.assertListEqual(tokenizer.tokenize("""unwantedX running""") , ["""[UNK]""", """runn""", """##ing"""]) def lowercase__ ( self): '''simple docstring''' self.assertTrue(_is_whitespace(""" """)) self.assertTrue(_is_whitespace("""\t""")) self.assertTrue(_is_whitespace("""\r""")) self.assertTrue(_is_whitespace("""\n""")) self.assertTrue(_is_whitespace("""\u00A0""")) self.assertFalse(_is_whitespace("""A""")) self.assertFalse(_is_whitespace("""-""")) def lowercase__ ( self): '''simple docstring''' self.assertTrue(_is_control("""\u0005""")) self.assertFalse(_is_control("""A""")) self.assertFalse(_is_control(""" """)) self.assertFalse(_is_control("""\t""")) self.assertFalse(_is_control("""\r""")) def lowercase__ ( self): '''simple docstring''' self.assertTrue(_is_punctuation("""-""")) self.assertTrue(_is_punctuation("""$""")) self.assertTrue(_is_punctuation("""`""")) self.assertTrue(_is_punctuation(""".""")) self.assertFalse(_is_punctuation("""A""")) self.assertFalse(_is_punctuation(""" """)) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]]) if self.test_rust_tokenizer: lowercase__ : int = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]]) def lowercase__ ( self): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'): lowercase__ : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.' lowercase__ : List[str] = tokenizer_r.encode_plus( SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , ) lowercase__ : str = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ , """do_lower_case""") else False lowercase__ : Optional[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """A"""), ((1, 2), ""","""), ((3, 5), """na"""), ((5, 6), """##ï"""), ((6, 8), """##ve"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """Allen"""), ((21, 23), """##NL"""), ((23, 24), """##P"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """a"""), ((1, 2), ""","""), ((3, 8), """naive"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """allen"""), ((21, 23), """##nl"""), ((23, 24), """##p"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""])) self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = ["""的""", """人""", """有"""] lowercase__ : List[str] = """""".join(SCREAMING_SNAKE_CASE_) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'): lowercase__ : Union[str, Any] = True lowercase__ : Tuple = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : str = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Any = False lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_) # it is expected that only the first Chinese character is not preceded by "##". lowercase__ : Any = [ f'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_) ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) @slow def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file) lowercase__ : Optional[Any] = tokenizer.encode("""你好""" , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Any = tokenizer.encode("""你是谁""" , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}'): lowercase__ : Optional[int] = """你好,你是谁""" lowercase__ : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_) lowercase__ : Any = tokenizer.prepare_for_model( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
12
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__) def UpperCamelCase ( lowercase_ ) -> Union[str, Any]: '''simple docstring''' lowercase__ : List[str] = """huggingface/label-files""" lowercase__ : Optional[int] = """imagenet-1k-id2label.json""" lowercase__ : int = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) ) lowercase__ : int = {int(lowercase_ ): v for k, v in idalabel.items()} lowercase__ : List[str] = {v: k for k, v in idalabel.items()} lowercase__ : Tuple = """std_conv""" if """bit""" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" lowercase__ : int = BitConfig( conv_layer=lowercase_ , num_labels=10_00 , idalabel=lowercase_ , labelaid=lowercase_ , ) return config def UpperCamelCase ( lowercase_ ) -> Tuple: '''simple docstring''' if "stem.conv" in name: lowercase__ : Optional[int] = name.replace("""stem.conv""" , """bit.embedder.convolution""" ) if "blocks" in name: lowercase__ : Tuple = name.replace("""blocks""" , """layers""" ) if "head.fc" in name: lowercase__ : List[str] = name.replace("""head.fc""" , """classifier.1""" ) if name.startswith("""norm""" ): lowercase__ : List[str] = """bit.""" + name if "bit" not in name and "classifier" not in name: lowercase__ : Optional[int] = """bit.encoder.""" + name return name def UpperCamelCase ( ) -> Dict: '''simple docstring''' lowercase__ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowercase__ : Union[str, Any] = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ) return im @torch.no_grad() def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=False ) -> Union[str, Any]: '''simple docstring''' lowercase__ : Tuple = get_config(lowercase_ ) # load original model from timm lowercase__ : str = create_model(lowercase_ , pretrained=lowercase_ ) timm_model.eval() # load state_dict of original model lowercase__ : Any = timm_model.state_dict() for key in state_dict.copy().keys(): lowercase__ : Optional[Any] = state_dict.pop(lowercase_ ) lowercase__ : Tuple = val.squeeze() if """head""" in key else val # load HuggingFace model lowercase__ : List[str] = BitForImageClassification(lowercase_ ) model.eval() model.load_state_dict(lowercase_ ) # create image processor lowercase__ : Tuple = create_transform(**resolve_data_config({} , model=lowercase_ ) ) lowercase__ : Any = transform.transforms lowercase__ : Dict = { """bilinear""": PILImageResampling.BILINEAR, """bicubic""": PILImageResampling.BICUBIC, """nearest""": PILImageResampling.NEAREST, } lowercase__ : Any = BitImageProcessor( do_resize=lowercase_ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase_ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=lowercase_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) lowercase__ : List[Any] = prepare_img() lowercase__ : Any = transform(lowercase_ ).unsqueeze(0 ) lowercase__ : str = processor(lowercase_ , return_tensors="""pt""" ).pixel_values # verify pixel values assert torch.allclose(lowercase_ , lowercase_ ) # verify logits with torch.no_grad(): lowercase__ : Any = model(lowercase_ ) lowercase__ : Dict = outputs.logits print("""Logits:""" , logits[0, :3] ) print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] ) lowercase__ : List[str] = timm_model(lowercase_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowercase_ , outputs.logits , atol=1E-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(lowercase_ ).mkdir(exist_ok=lowercase_ ) print(F'Saving model {model_name} and processor to {pytorch_dump_folder_path}' ) model.save_pretrained(lowercase_ ) processor.save_pretrained(lowercase_ ) if push_to_hub: print(F'Pushing model {model_name} and processor to the hub' ) model.push_to_hub(F'ybelkada/{model_name}' ) processor.push_to_hub(F'ybelkada/{model_name}' ) if __name__ == "__main__": lowerCamelCase__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""resnetv2_50x1_bitm""", type=str, help="""Name of the BiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub.""", ) lowerCamelCase__ : Dict = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
12
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase_ ) class _snake_case ( UpperCAmelCase_ ): def __init__( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_) if self.framework == "tf": raise ValueError(f'The {self.__class__} is only available in PyTorch.') requires_backends(self , """vision""") self.check_model_type(SCREAMING_SNAKE_CASE_) def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' if "text_queries" in kwargs: lowercase__ : Any = kwargs.pop("""text_queries""") if isinstance(SCREAMING_SNAKE_CASE_ , (str, Image.Image)): lowercase__ : Optional[Any] = {"""image""": image, """candidate_labels""": candidate_labels} else: lowercase__ : int = image lowercase__ : List[str] = super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) return results def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = {} if "threshold" in kwargs: lowercase__ : List[Any] = kwargs["""threshold"""] if "top_k" in kwargs: lowercase__ : int = kwargs["""top_k"""] return {}, {}, postprocess_params def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : str = load_image(inputs["""image"""]) lowercase__ : Any = inputs["""candidate_labels"""] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): lowercase__ : List[str] = candidate_labels.split(""",""") lowercase__ : Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa) for i, candidate_label in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework) lowercase__ : Union[str, Any] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework) yield { "is_last": i == len(SCREAMING_SNAKE_CASE_) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : str = model_inputs.pop("""target_size""") lowercase__ : Optional[int] = model_inputs.pop("""candidate_label""") lowercase__ : Dict = model_inputs.pop("""is_last""") lowercase__ : Union[str, Any] = self.model(**SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs} return model_outputs def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=None): '''simple docstring''' lowercase__ : Union[str, Any] = [] for model_output in model_outputs: lowercase__ : Optional[int] = model_output["""candidate_label"""] lowercase__ : Tuple = BaseModelOutput(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = self.image_processor.post_process_object_detection( outputs=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ , target_sizes=model_output["""target_size"""])[0] for index in outputs["scores"].nonzero(): lowercase__ : Optional[Any] = outputs["""scores"""][index].item() lowercase__ : Optional[Any] = self._get_bounding_box(outputs["""boxes"""][index][0]) lowercase__ : Tuple = {"""score""": score, """label""": label, """box""": box} results.append(SCREAMING_SNAKE_CASE_) lowercase__ : int = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: x["score"] , reverse=SCREAMING_SNAKE_CASE_) if top_k: lowercase__ : Any = results[:top_k] return results def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' if self.framework != "pt": raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""") lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = box.int().tolist() lowercase__ : Optional[int] = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
12
1
from random import shuffle import tensorflow as tf from numpy import array def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[int]: '''simple docstring''' lowercase__ : Union[str, Any] = int(lowercase_ ) assert noofclusters < len(lowercase_ ) # Find out the dimensionality lowercase__ : Optional[Any] = len(vectors[0] ) # Will help select random centroids from among the available vectors lowercase__ : Tuple = list(range(len(lowercase_ ) ) ) shuffle(lowercase_ ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. lowercase__ : str = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION lowercase__ : Union[str, Any] = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points lowercase__ : Tuple = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ ) ] ##These nodes will assign the centroid Variables the appropriate ##values lowercase__ : int = tf.placeholder("""float64""" , [dim] ) lowercase__ : str = [] for centroid in centroids: cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) lowercase__ : int = [tf.Variable(0 ) for i in range(len(lowercase_ ) )] ##These nodes will assign an assignment Variable the appropriate ##value lowercase__ : Dict = tf.placeholder("""int32""" ) lowercase__ : Tuple = [] for assignment in assignments: cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input lowercase__ : Union[str, Any] = tf.placeholder("""float""" , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors lowercase__ : Optional[int] = tf.reduce_mean(lowercase_ , 0 ) ##Node for computing Euclidean distances # Placeholders for input lowercase__ : Any = tf.placeholder("""float""" , [dim] ) lowercase__ : Optional[Any] = tf.placeholder("""float""" , [dim] ) lowercase__ : Optional[int] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input lowercase__ : int = tf.placeholder("""float""" , [noofclusters] ) lowercase__ : Dict = tf.argmin(lowercase_ , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. lowercase__ : Optional[Any] = tf.initialize_all_variables() # Initialize all variables sess.run(lowercase_ ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. lowercase__ : int = 1_00 for _ in range(lowercase_ ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(lowercase_ ) ): lowercase__ : List[Any] = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. lowercase__ : Optional[Any] = [ sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input lowercase__ : Any = sess.run( lowercase_ , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(lowercase_ ): # Collect all the vectors assigned to this cluster lowercase__ : Optional[Any] = [ vectors[i] for i in range(len(lowercase_ ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location lowercase__ : Dict = sess.run( lowercase_ , feed_dict={mean_input: array(lowercase_ )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments lowercase__ : Optional[Any] = sess.run(lowercase_ ) lowercase__ : List[str] = sess.run(lowercase_ ) return centroids, assignments
12
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]: '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: lowercase__ : str = mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) else: lowercase__ : List[str] = max( mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) , mf_knapsack(i - 1 , lowercase_ , lowercase_ , j - wt[i - 1] ) + val[i - 1] , ) lowercase__ : List[Any] = val return f[i][j] def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str: '''simple docstring''' lowercase__ : Any = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: lowercase__ : List[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: lowercase__ : Tuple = dp[i - 1][w_] return dp[n][w_], dp def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' if not (isinstance(lowercase_ , (list, tuple) ) and isinstance(lowercase_ , (list, tuple) )): raise ValueError( """Both the weights and values vectors must be either lists or tuples""" ) lowercase__ : str = len(lowercase_ ) if num_items != len(lowercase_ ): lowercase__ : Optional[int] = ( """The number of weights must be the same as the number of values.\n""" F'But got {num_items} weights and {len(lowercase_ )} values' ) raise ValueError(lowercase_ ) for i in range(lowercase_ ): if not isinstance(wt[i] , lowercase_ ): lowercase__ : int = ( """All weights must be integers but got weight of """ F'type {type(wt[i] )} at index {i}' ) raise TypeError(lowercase_ ) lowercase__ , lowercase__ : Tuple = knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : set = set() _construct_solution(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) return optimal_val, example_optional_set def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any: '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(lowercase_ , lowercase_ , i - 1 , lowercase_ , lowercase_ ) else: optimal_set.add(lowercase_ ) _construct_solution(lowercase_ , lowercase_ , i - 1 , j - wt[i - 1] , lowercase_ ) if __name__ == "__main__": lowerCamelCase__ : Dict = [3, 2, 4, 4] lowerCamelCase__ : List[Any] = [4, 3, 2, 3] lowerCamelCase__ : Optional[int] = 4 lowerCamelCase__ : Dict = 6 lowerCamelCase__ : Optional[int] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] lowerCamelCase__ , lowerCamelCase__ : int = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 lowerCamelCase__ , lowerCamelCase__ : Optional[int] = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
12
1
from __future__ import annotations from collections import Counter from random import random class _snake_case : def __init__( self): '''simple docstring''' lowercase__ : List[Any] = {} def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Optional[int] = {} def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' if nodea not in self.connections: self.add_node(SCREAMING_SNAKE_CASE_) if nodea not in self.connections: self.add_node(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = probability def lowercase__ ( self): '''simple docstring''' return list(self.connections) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Dict = 0 lowercase__ : List[Any] = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> dict[str, int]: '''simple docstring''' lowercase__ : List[Any] = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(lowercase_ , lowercase_ , lowercase_ ) lowercase__ : Union[str, Any] = Counter(graph.get_nodes() ) lowercase__ : Tuple = start for _ in range(lowercase_ ): lowercase__ : Optional[Any] = graph.transition(lowercase_ ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
12
import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def UpperCamelCase ( lowercase_ ) -> Union[str, Any]: '''simple docstring''' return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def UpperCamelCase ( lowercase_ , lowercase_ ) -> List[Any]: '''simple docstring''' lowercase__ : int = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue lowercase__ : Optional[Any] = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" ) lowercase__ : Optional[Any] = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" ) lowercase__ : Optional[Any] = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" ) lowercase__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" ) lowercase__ : Optional[Any] = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" ) lowercase__ : Optional[int] = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" ) lowercase__ : List[Any] = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" ) lowercase__ : int = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" ) lowercase__ : Optional[Any] = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" ) lowercase__ : Optional[Any] = key.replace("""image_encoder.module""" , """flava.image_model""" ) lowercase__ : Any = key.replace("""text_encoder.module""" , """flava.text_model""" ) lowercase__ : Optional[Any] = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" ) lowercase__ : Tuple = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" ) lowercase__ : Any = key.replace("""text_projection""" , """flava.text_projection""" ) lowercase__ : List[Any] = key.replace("""image_projection""" , """flava.image_projection""" ) lowercase__ : str = value.float() for key, value in codebook_state_dict.items(): lowercase__ : Any = value return upgrade @torch.no_grad() def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Union[str, Any]: '''simple docstring''' if config_path is not None: lowercase__ : int = FlavaConfig.from_pretrained(lowercase_ ) else: lowercase__ : Optional[int] = FlavaConfig() lowercase__ : List[Any] = FlavaForPreTraining(lowercase_ ).eval() lowercase__ : Dict = convert_dalle_checkpoint(lowercase_ , lowercase_ , save_checkpoint=lowercase_ ) if os.path.exists(lowercase_ ): lowercase__ : Dict = torch.load(lowercase_ , map_location="""cpu""" ) else: lowercase__ : Dict = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" ) lowercase__ : int = upgrade_state_dict(lowercase_ , lowercase_ ) hf_model.load_state_dict(lowercase_ ) lowercase__ : Optional[int] = hf_model.state_dict() lowercase__ : Optional[int] = count_parameters(lowercase_ ) lowercase__ : Any = count_parameters(lowercase_ ) + count_parameters(lowercase_ ) assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) hf_model.save_pretrained(lowercase_ ) if __name__ == "__main__": lowerCamelCase__ : int = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""") parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") lowerCamelCase__ : List[str] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
12
1
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( DiffusionPipeline, UnCLIPImageVariationPipeline, UnCLIPScheduler, UNetaDConditionModel, UNetaDModel, ) from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _snake_case ( UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = UnCLIPImageVariationPipeline __lowerCAmelCase : List[Any] = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'} __lowerCAmelCase : Optional[int] = IMAGE_VARIATION_BATCH_PARAMS __lowerCAmelCase : Tuple = [ 'generator', 'return_dict', 'decoder_num_inference_steps', 'super_res_num_inference_steps', ] __lowerCAmelCase : int = False @property def lowercase__ ( self): '''simple docstring''' return 32 @property def lowercase__ ( self): '''simple docstring''' return 32 @property def lowercase__ ( self): '''simple docstring''' return self.time_input_dim @property def lowercase__ ( self): '''simple docstring''' return self.time_input_dim * 4 @property def lowercase__ ( self): '''simple docstring''' return 1_00 @property def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""") return tokenizer @property def lowercase__ ( self): '''simple docstring''' torch.manual_seed(0) lowercase__ : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_) @property def lowercase__ ( self): '''simple docstring''' torch.manual_seed(0) lowercase__ : Any = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) return CLIPVisionModelWithProjection(SCREAMING_SNAKE_CASE_) @property def lowercase__ ( self): '''simple docstring''' torch.manual_seed(0) lowercase__ : List[Any] = { """clip_embeddings_dim""": self.text_embedder_hidden_size, """time_embed_dim""": self.time_embed_dim, """cross_attention_dim""": self.cross_attention_dim, } lowercase__ : List[str] = UnCLIPTextProjModel(**SCREAMING_SNAKE_CASE_) return model @property def lowercase__ ( self): '''simple docstring''' torch.manual_seed(0) lowercase__ : List[str] = { """sample_size""": 32, # RGB in channels """in_channels""": 3, # Out channels is double in channels because predicts mean and variance """out_channels""": 6, """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": """identity""", } lowercase__ : Optional[int] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_) return model @property def lowercase__ ( self): '''simple docstring''' return { "sample_size": 64, "layers_per_block": 1, "down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"), "up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"), "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "in_channels": 6, "out_channels": 3, } @property def lowercase__ ( self): '''simple docstring''' torch.manual_seed(0) lowercase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs) return model @property def lowercase__ ( self): '''simple docstring''' torch.manual_seed(1) lowercase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs) return model def lowercase__ ( self): '''simple docstring''' lowercase__ : int = self.dummy_decoder lowercase__ : Any = self.dummy_text_proj lowercase__ : Optional[int] = self.dummy_text_encoder lowercase__ : Optional[int] = self.dummy_tokenizer lowercase__ : Tuple = self.dummy_super_res_first lowercase__ : List[Any] = self.dummy_super_res_last lowercase__ : Optional[Any] = UnCLIPScheduler( variance_type="""learned_range""" , prediction_type="""epsilon""" , num_train_timesteps=10_00 , ) lowercase__ : Dict = UnCLIPScheduler( variance_type="""fixed_small_log""" , prediction_type="""epsilon""" , num_train_timesteps=10_00 , ) lowercase__ : Tuple = CLIPImageProcessor(crop_size=32 , size=32) lowercase__ : List[str] = self.dummy_image_encoder return { "decoder": decoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_proj": text_proj, "feature_extractor": feature_extractor, "image_encoder": image_encoder, "super_res_first": super_res_first, "super_res_last": super_res_last, "decoder_scheduler": decoder_scheduler, "super_res_scheduler": super_res_scheduler, } def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=True): '''simple docstring''' lowercase__ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_)).to(SCREAMING_SNAKE_CASE_) if str(SCREAMING_SNAKE_CASE_).startswith("""mps"""): lowercase__ : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_) else: lowercase__ : int = torch.Generator(device=SCREAMING_SNAKE_CASE_).manual_seed(SCREAMING_SNAKE_CASE_) if pil_image: lowercase__ : Any = input_image * 0.5 + 0.5 lowercase__ : Any = input_image.clamp(0 , 1) lowercase__ : Tuple = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy() lowercase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(SCREAMING_SNAKE_CASE_)[0] return { "image": input_image, "generator": generator, "decoder_num_inference_steps": 2, "super_res_num_inference_steps": 2, "output_type": "np", } def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = """cpu""" lowercase__ : List[str] = self.get_dummy_components() lowercase__ : int = self.pipeline_class(**SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE_) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = pipe(**SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = output.images lowercase__ : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = pipe( **SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0] lowercase__ : Any = image[0, -3:, -3:, -1] lowercase__ : List[str] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__ : str = np.array( [ 0.9_9_9_7, 0.0_0_0_2, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_6_9, 0.0_0_2_3, 0.9_9_9_7, 0.9_9_6_9, 0.9_9_7_0, ]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = """cpu""" lowercase__ : Optional[int] = self.get_dummy_components() lowercase__ : List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = pipe.to(SCREAMING_SNAKE_CASE_) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = pipe(**SCREAMING_SNAKE_CASE_) lowercase__ : int = output.images lowercase__ : List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_) lowercase__ : int = pipe( **SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0] lowercase__ : Optional[Any] = image[0, -3:, -3:, -1] lowercase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = """cpu""" lowercase__ : Union[str, Any] = self.get_dummy_components() lowercase__ : str = self.pipeline_class(**SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = pipe.to(SCREAMING_SNAKE_CASE_) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = [ pipeline_inputs["""image"""], pipeline_inputs["""image"""], ] lowercase__ : List[str] = pipe(**SCREAMING_SNAKE_CASE_) lowercase__ : str = output.images lowercase__ : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_) lowercase__ : str = [ tuple_pipeline_inputs["""image"""], tuple_pipeline_inputs["""image"""], ] lowercase__ : Optional[int] = pipe( **SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0] lowercase__ : Optional[Any] = image[0, -3:, -3:, -1] lowercase__ : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (2, 64, 64, 3) lowercase__ : List[str] = np.array( [ 0.9_9_9_7, 0.9_9_8_9, 0.0_0_0_8, 0.0_0_2_1, 0.9_9_6_0, 0.0_0_1_8, 0.0_0_1_4, 0.0_0_0_2, 0.9_9_3_3, ]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = torch.device("""cpu""") class _snake_case : __lowerCAmelCase : Optional[Any] = 1 lowercase__ : int = self.get_dummy_components() lowercase__ : Tuple = self.pipeline_class(**SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = pipe.to(SCREAMING_SNAKE_CASE_) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE_).manual_seed(0) lowercase__ : List[str] = pipe.decoder.dtype lowercase__ : int = 1 lowercase__ : Dict = ( batch_size, pipe.decoder.config.in_channels, pipe.decoder.config.sample_size, pipe.decoder.config.sample_size, ) lowercase__ : Tuple = pipe.prepare_latents( SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , scheduler=DummyScheduler()) lowercase__ : Tuple = ( batch_size, pipe.super_res_first.config.in_channels // 2, pipe.super_res_first.config.sample_size, pipe.super_res_first.config.sample_size, ) lowercase__ : str = pipe.prepare_latents( SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , scheduler=DummyScheduler()) lowercase__ : List[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = pipe( **SCREAMING_SNAKE_CASE_ , decoder_latents=SCREAMING_SNAKE_CASE_ , super_res_latents=SCREAMING_SNAKE_CASE_).images lowercase__ : List[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_) # Don't pass image, instead pass embedding lowercase__ : List[str] = pipeline_inputs.pop("""image""") lowercase__ : Optional[int] = pipe.image_encoder(SCREAMING_SNAKE_CASE_).image_embeds lowercase__ : Union[str, Any] = pipe( **SCREAMING_SNAKE_CASE_ , decoder_latents=SCREAMING_SNAKE_CASE_ , super_res_latents=SCREAMING_SNAKE_CASE_ , image_embeddings=SCREAMING_SNAKE_CASE_ , ).images # make sure passing text embeddings manually is identical assert np.abs(img_out_a - img_out_a).max() < 1E-4 @skip_mps def lowercase__ ( self): '''simple docstring''' lowercase__ : List[Any] = torch_device == """cpu""" # Check is relaxed because there is not a torch 2.0 sliced attention added kv processor lowercase__ : str = 1E-2 self._test_attention_slicing_forward_pass( test_max_difference=SCREAMING_SNAKE_CASE_ , expected_max_diff=SCREAMING_SNAKE_CASE_) @skip_mps def lowercase__ ( self): '''simple docstring''' lowercase__ : str = torch_device == """cpu""" lowercase__ : Optional[int] = True lowercase__ : Tuple = [ """decoder_num_inference_steps""", """super_res_num_inference_steps""", ] self._test_inference_batch_single_identical( test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , additional_params_copy_to_batched_inputs=SCREAMING_SNAKE_CASE_ , ) def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = [ """decoder_num_inference_steps""", """super_res_num_inference_steps""", ] if torch_device == "mps": # TODO: MPS errors with larger batch sizes lowercase__ : Optional[int] = [2, 3] self._test_inference_batch_consistent( batch_sizes=SCREAMING_SNAKE_CASE_ , additional_params_copy_to_batched_inputs=SCREAMING_SNAKE_CASE_ , ) else: self._test_inference_batch_consistent( additional_params_copy_to_batched_inputs=SCREAMING_SNAKE_CASE_) @skip_mps def lowercase__ ( self): '''simple docstring''' return super().test_dict_tuple_outputs_equivalent() @skip_mps def lowercase__ ( self): '''simple docstring''' return super().test_save_load_local() @skip_mps def lowercase__ ( self): '''simple docstring''' return super().test_save_load_optional_components() @slow @require_torch_gpu class _snake_case ( unittest.TestCase ): def lowercase__ ( self): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png""") lowercase__ : Tuple = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/unclip/karlo_v1_alpha_cat_variation_fp16.npy""") lowercase__ : str = UnCLIPImageVariationPipeline.from_pretrained( """kakaobrain/karlo-v1-alpha-image-variations""" , torch_dtype=torch.floataa) lowercase__ : Optional[int] = pipeline.to(SCREAMING_SNAKE_CASE_) pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = torch.Generator(device="""cpu""").manual_seed(0) lowercase__ : int = pipeline( SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , output_type="""np""" , ) lowercase__ : str = output.images[0] assert image.shape == (2_56, 2_56, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 15)
12
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _snake_case ( unittest.TestCase ): def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , ): '''simple docstring''' lowercase__ : List[str] = size if size is not None else {"""height""": 18, """width""": 18} lowercase__ : int = parent lowercase__ : Union[str, Any] = batch_size lowercase__ : List[str] = num_channels lowercase__ : str = image_size lowercase__ : int = min_resolution lowercase__ : Dict = max_resolution lowercase__ : Tuple = do_resize lowercase__ : Union[str, Any] = size lowercase__ : Any = do_normalize lowercase__ : Tuple = image_mean lowercase__ : str = image_std def lowercase__ ( self): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class _snake_case ( UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Optional[Any] = ViTImageProcessor if is_vision_available() else None def lowercase__ ( self): '''simple docstring''' lowercase__ : str = EfficientFormerImageProcessorTester(self) @property def lowercase__ ( self): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_mean""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_std""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size""")) def lowercase__ ( self): '''simple docstring''' pass def lowercase__ ( self): '''simple docstring''' lowercase__ : str = self.image_processing_class(**self.image_processor_dict) # create random PIL images lowercase__ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image) # Test not batched input lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched lowercase__ : str = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors lowercase__ : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray) # Test not batched input lowercase__ : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched lowercase__ : Dict = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors lowercase__ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor) # Test not batched input lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched lowercase__ : Any = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , )
12
1
def UpperCamelCase ( lowercase_ , lowercase_ ) -> float: '''simple docstring''' return price * (1 + tax_rate) if __name__ == "__main__": print(f'''{price_plus_tax(1_0_0, 0.25) = }''') print(f'''{price_plus_tax(125.50, 0.05) = }''')
12
lowerCamelCase__ : dict[tuple[int, int, int], int] = {} def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on lowercase__ : Tuple = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one lowercase__ : Union[str, Any] = _calculate(days - 1 , lowercase_ , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 lowercase__ : List[str] = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter lowercase__ : Dict = _calculate(days - 1 , lowercase_ , 0 ) lowercase__ : List[str] = state_late + state_absent + state_ontime lowercase__ : List[Any] = prizestrings return prizestrings def UpperCamelCase ( lowercase_ = 30 ) -> int: '''simple docstring''' return _calculate(lowercase_ , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
12
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase__ : Union[str, Any] = { """configuration_chinese_clip""": [ """CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ChineseCLIPConfig""", """ChineseCLIPOnnxConfig""", """ChineseCLIPTextConfig""", """ChineseCLIPVisionConfig""", ], """processing_chinese_clip""": ["""ChineseCLIPProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Optional[Any] = ["""ChineseCLIPFeatureExtractor"""] lowerCamelCase__ : Optional[int] = ["""ChineseCLIPImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : List[str] = [ """CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """ChineseCLIPModel""", """ChineseCLIPPreTrainedModel""", """ChineseCLIPTextModel""", """ChineseCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
12
import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def UpperCamelCase ( ) -> List[Any]: '''simple docstring''' raise RuntimeError("""CUDA out of memory.""" ) class _snake_case ( nn.Module ): def __init__( self): '''simple docstring''' super().__init__() lowercase__ : Optional[Any] = nn.Linear(3 , 4) lowercase__ : Union[str, Any] = nn.BatchNormad(4) lowercase__ : str = nn.Linear(4 , 5) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE_))) class _snake_case ( unittest.TestCase ): def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = [] @find_executable_batch_size(starting_batch_size=1_28) def mock_training_loop_function(SCREAMING_SNAKE_CASE_): nonlocal batch_sizes batch_sizes.append(SCREAMING_SNAKE_CASE_) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(SCREAMING_SNAKE_CASE_ , [1_28, 64, 32, 16, 8]) def lowercase__ ( self): '''simple docstring''' lowercase__ : int = [] @find_executable_batch_size(starting_batch_size=1_28) def mock_training_loop_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): nonlocal batch_sizes batch_sizes.append(SCREAMING_SNAKE_CASE_) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga lowercase__ , lowercase__ : int = mock_training_loop_function("""hello""") self.assertListEqual(SCREAMING_SNAKE_CASE_ , [1_28, 64, 32, 16, 8]) self.assertListEqual([bs, arga] , [8, """hello"""]) def lowercase__ ( self): '''simple docstring''' @find_executable_batch_size(starting_batch_size=0) def mock_training_loop_function(SCREAMING_SNAKE_CASE_): pass with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0]) def lowercase__ ( self): '''simple docstring''' @find_executable_batch_size(starting_batch_size=16) def mock_training_loop_function(SCREAMING_SNAKE_CASE_): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0]) def lowercase__ ( self): '''simple docstring''' @find_executable_batch_size(starting_batch_size=1_28) def mock_training_loop_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm: mock_training_loop_function(1_28 , """hello""" , """world""") self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0]) self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0]) def lowercase__ ( self): '''simple docstring''' @find_executable_batch_size(starting_batch_size=16) def mock_training_loop_function(SCREAMING_SNAKE_CASE_): raise ValueError("""Oops, we had an error!""") with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm: mock_training_loop_function() self.assertIn("""Oops, we had an error!""" , cm.exception.args[0]) @require_cuda def lowercase__ ( self): '''simple docstring''' lowercase__ : str = torch.cuda.memory_allocated() lowercase__ : str = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = release_memory(SCREAMING_SNAKE_CASE_) self.assertEqual(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE_)
12
1
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration lowerCamelCase__ : int = { """tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""", """tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""", """base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""", """base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""", """small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""", """small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""", """medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""", """medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""", """large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""", """large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""", } def UpperCamelCase ( lowercase_ ) -> Union[str, Any]: '''simple docstring''' lowercase__ : int = ["""layers""", """blocks"""] for k in ignore_keys: state_dict.pop(lowercase_ , lowercase_ ) lowerCamelCase__ : Optional[Any] = { """blocks""": """layers""", """mlp.0""": """fc1""", """mlp.2""": """fc2""", """mlp_ln""": """final_layer_norm""", """.attn.query""": """.self_attn.q_proj""", """.attn.key""": """.self_attn.k_proj""", """.attn.value""": """.self_attn.v_proj""", """.attn_ln""": """.self_attn_layer_norm""", """.attn.out""": """.self_attn.out_proj""", """.cross_attn.query""": """.encoder_attn.q_proj""", """.cross_attn.key""": """.encoder_attn.k_proj""", """.cross_attn.value""": """.encoder_attn.v_proj""", """.cross_attn_ln""": """.encoder_attn_layer_norm""", """.cross_attn.out""": """.encoder_attn.out_proj""", """decoder.ln.""": """decoder.layer_norm.""", """encoder.ln.""": """encoder.layer_norm.""", """token_embedding""": """embed_tokens""", """encoder.positional_embedding""": """encoder.embed_positions.weight""", """decoder.positional_embedding""": """decoder.embed_positions.weight""", """ln_post""": """layer_norm""", } def UpperCamelCase ( lowercase_ ) -> int: '''simple docstring''' lowercase__ : str = list(s_dict.keys() ) for key in keys: lowercase__ : int = key for k, v in WHISPER_MAPPING.items(): if k in key: lowercase__ : Union[str, Any] = new_key.replace(lowercase_ , lowercase_ ) print(F'{key} -> {new_key}' ) lowercase__ : List[str] = s_dict.pop(lowercase_ ) return s_dict def UpperCamelCase ( lowercase_ ) -> List[str]: '''simple docstring''' lowercase__ , lowercase__ : int = emb.weight.shape lowercase__ : List[Any] = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ ) lowercase__ : Any = emb.weight.data return lin_layer def UpperCamelCase ( lowercase_ , lowercase_ ) -> bytes: '''simple docstring''' os.makedirs(lowercase_ , exist_ok=lowercase_ ) lowercase__ : List[str] = os.path.basename(lowercase_ ) lowercase__ : Union[str, Any] = url.split("""/""" )[-2] lowercase__ : int = os.path.join(lowercase_ , lowercase_ ) if os.path.exists(lowercase_ ) and not os.path.isfile(lowercase_ ): raise RuntimeError(F'{download_target} exists and is not a regular file' ) if os.path.isfile(lowercase_ ): lowercase__ : Tuple = open(lowercase_ , """rb""" ).read() if hashlib.shaaaa(lowercase_ ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(F'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file' ) with urllib.request.urlopen(lowercase_ ) as source, open(lowercase_ , """wb""" ) as output: with tqdm( total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=lowercase_ , unit_divisor=10_24 ) as loop: while True: lowercase__ : str = source.read(81_92 ) if not buffer: break output.write(lowercase_ ) loop.update(len(lowercase_ ) ) lowercase__ : Tuple = open(lowercase_ , """rb""" ).read() if hashlib.shaaaa(lowercase_ ).hexdigest() != expected_shaaaa: raise RuntimeError( """Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" ) return model_bytes def UpperCamelCase ( lowercase_ , lowercase_ ) -> List[Any]: '''simple docstring''' if ".pt" not in checkpoint_path: lowercase__ : Dict = _download(_MODELS[checkpoint_path] ) else: lowercase__ : str = torch.load(lowercase_ , map_location="""cpu""" ) lowercase__ : List[Any] = original_checkpoint["""dims"""] lowercase__ : Optional[Any] = original_checkpoint["""model_state_dict"""] lowercase__ : str = state_dict["""decoder.token_embedding.weight"""] remove_ignore_keys_(lowercase_ ) rename_keys(lowercase_ ) lowercase__ : Any = True lowercase__ : Any = state_dict["""decoder.layers.0.fc1.weight"""].shape[0] lowercase__ : int = WhisperConfig( vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=lowercase_ , decoder_ffn_dim=lowercase_ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , ) lowercase__ : Optional[Any] = WhisperForConditionalGeneration(lowercase_ ) lowercase__ , lowercase__ : int = model.model.load_state_dict(lowercase_ , strict=lowercase_ ) if len(lowercase_ ) > 0 and not set(lowercase_ ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( """Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,""" F' but all the following weights are missing {missing}' ) if tie_embeds: lowercase__ : List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens ) else: lowercase__ : Tuple = proj_out_weights model.save_pretrained(lowercase_ ) if __name__ == "__main__": lowerCamelCase__ : Tuple = argparse.ArgumentParser() # # Required parameters parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") lowerCamelCase__ : Any = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
12
import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def UpperCamelCase ( lowercase_ ) -> Any: '''simple docstring''' lowercase__ : Optional[Any] = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowercase__ : List[str] = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: lowercase__ : Optional[int] = 4 lowercase__ : Optional[Any] = 48 lowercase__ : int = """pixelshuffle_aux""" elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowercase__ : List[str] = [6, 6, 6, 6] lowercase__ : Any = 60 lowercase__ : Tuple = [6, 6, 6, 6] lowercase__ : Dict = """pixelshuffledirect""" elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowercase__ : Tuple = 4 lowercase__ : Any = """nearest+conv""" elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: lowercase__ : str = 1 lowercase__ : Optional[int] = 1 lowercase__ : Optional[int] = 1_26 lowercase__ : Any = 7 lowercase__ : int = 255.0 lowercase__ : List[Any] = """""" return config def UpperCamelCase ( lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' if "patch_embed.proj" in name and "layers" not in name: lowercase__ : Dict = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: lowercase__ : Dict = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" ) if "layers" in name: lowercase__ : List[str] = name.replace("""layers""" , """encoder.stages""" ) if "residual_group.blocks" in name: lowercase__ : Optional[int] = name.replace("""residual_group.blocks""" , """layers""" ) if "attn.proj" in name: lowercase__ : int = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: lowercase__ : Tuple = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: lowercase__ : int = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: lowercase__ : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: lowercase__ : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowercase__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" ) if "q_bias" in name: lowercase__ : Any = name.replace("""q_bias""" , """query.bias""" ) if "k_bias" in name: lowercase__ : Optional[Any] = name.replace("""k_bias""" , """key.bias""" ) if "v_bias" in name: lowercase__ : Dict = name.replace("""v_bias""" , """value.bias""" ) if "cpb_mlp" in name: lowercase__ : Union[str, Any] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" ) if "patch_embed.proj" in name: lowercase__ : List[Any] = name.replace("""patch_embed.proj""" , """patch_embed.projection""" ) if name == "norm.weight": lowercase__ : Union[str, Any] = """layernorm.weight""" if name == "norm.bias": lowercase__ : List[str] = """layernorm.bias""" if "conv_first" in name: lowercase__ : Union[str, Any] = name.replace("""conv_first""" , """first_convolution""" ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: lowercase__ : List[Any] = name.replace("""conv_last""" , """final_convolution""" ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: lowercase__ : Optional[int] = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" ) if "upsample.0" in name: lowercase__ : Dict = name.replace("""upsample.0""" , """upsample.convolution_0""" ) if "upsample.2" in name: lowercase__ : Optional[Any] = name.replace("""upsample.2""" , """upsample.convolution_1""" ) lowercase__ : List[str] = """upsample.""" + name elif config.upsampler == "pixelshuffledirect": lowercase__ : Optional[Any] = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" ) lowercase__ : int = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" ) else: pass else: lowercase__ : str = """swin2sr.""" + name return name def UpperCamelCase ( lowercase_ , lowercase_ ) -> int: '''simple docstring''' for key in orig_state_dict.copy().keys(): lowercase__ : str = orig_state_dict.pop(lowercase_ ) if "qkv" in key: lowercase__ : Any = key.split(""".""" ) lowercase__ : List[Any] = int(key_split[1] ) lowercase__ : Dict = int(key_split[4] ) lowercase__ : Optional[Any] = config.embed_dim if "weight" in key: lowercase__ : List[str] = val[:dim, :] lowercase__ : List[str] = val[dim : dim * 2, :] lowercase__ : Optional[Any] = val[-dim:, :] else: lowercase__ : Optional[Any] = val[:dim] lowercase__ : List[Any] = val[dim : dim * 2] lowercase__ : Optional[int] = val[-dim:] pass else: lowercase__ : Optional[Any] = val return orig_state_dict def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' lowercase__ : Dict = get_config(lowercase_ ) lowercase__ : Any = SwinaSRForImageSuperResolution(lowercase_ ) model.eval() lowercase__ : List[str] = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" ) lowercase__ : Union[str, Any] = convert_state_dict(lowercase_ , lowercase_ ) lowercase__ , lowercase__ : Dict = model.load_state_dict(lowercase_ , strict=lowercase_ ) if len(lowercase_ ) > 0: raise ValueError("""Missing keys when converting: {}""".format(lowercase_ ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(F'Unexpected key {key} in state_dict' ) # verify values lowercase__ : Any = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true""" lowercase__ : Any = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert("""RGB""" ) lowercase__ : Any = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values lowercase__ : Optional[int] = 1_26 if """Jpeg""" in checkpoint_url else 2_56 lowercase__ : Union[str, Any] = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) lowercase__ : Dict = transforms(lowercase_ ).unsqueeze(0 ) if config.num_channels == 1: lowercase__ : Any = pixel_values[:, 0, :, :].unsqueeze(1 ) lowercase__ : Union[str, Any] = model(lowercase_ ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: lowercase__ : Optional[Any] = torch.Size([1, 3, 5_12, 5_12] ) lowercase__ : Optional[Any] = torch.tensor( [[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowercase__ : List[str] = torch.Size([1, 3, 10_24, 10_24] ) lowercase__ : int = torch.tensor( [[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here lowercase__ : Optional[Any] = torch.Size([1, 3, 10_24, 10_24] ) lowercase__ : int = torch.tensor( [[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowercase__ : Tuple = torch.Size([1, 3, 5_12, 5_12] ) lowercase__ : int = torch.tensor( [[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowercase__ : Tuple = torch.Size([1, 3, 10_24, 10_24] ) lowercase__ : int = torch.tensor( [[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] ) assert ( outputs.reconstruction.shape == expected_shape ), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowercase_ , atol=1E-3 ) print("""Looks ok!""" ) lowercase__ : str = { """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": ( """swin2SR-classical-sr-x2-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": ( """swin2SR-classical-sr-x4-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": ( """swin2SR-compressed-sr-x4-48""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": ( """swin2SR-lightweight-x2-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": ( """swin2SR-realworld-sr-x4-64-bsrgan-psnr""" ), } lowercase__ : str = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowercase_ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(lowercase_ ) if push_to_hub: model.push_to_hub(F'caidas/{model_name}' ) processor.push_to_hub(F'caidas/{model_name}' ) if __name__ == "__main__": lowerCamelCase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""", type=str, help="""URL of the original Swin2SR checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""") lowerCamelCase__ : Any = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
12
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available lowerCamelCase__ : Optional[Any] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Tuple = ["""HerbertTokenizerFast"""] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys lowerCamelCase__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
12
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : BigBirdConfig __lowerCAmelCase : jnp.dtype = jnp.floataa __lowerCAmelCase : bool = True def lowercase__ ( self): '''simple docstring''' super().setup() lowercase__ : Dict = nn.Dense(5 , dtype=self.dtype) def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : List[str] = super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = self.cls(outputs[2]) return outputs[:2] + (cls_out,) class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Optional[int] = FlaxBigBirdForNaturalQuestionsModule def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' def cross_entropy(lowercase_ , lowercase_ , lowercase_=None ): lowercase__ : int = logits.shape[-1] lowercase__ : List[str] = (labels[..., None] == jnp.arange(lowercase_ )[None]).astype("""f4""" ) lowercase__ : int = jax.nn.log_softmax(lowercase_ , axis=-1 ) lowercase__ : Any = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: lowercase__ : Optional[int] = reduction(lowercase_ ) return loss lowercase__ : int = partial(lowercase_ , reduction=jnp.mean ) lowercase__ : Tuple = cross_entropy(lowercase_ , lowercase_ ) lowercase__ : List[Any] = cross_entropy(lowercase_ , lowercase_ ) lowercase__ : Union[str, Any] = cross_entropy(lowercase_ , lowercase_ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class _snake_case : __lowerCAmelCase : str = "google/bigbird-roberta-base" __lowerCAmelCase : int = 3_000 __lowerCAmelCase : int = 10_500 __lowerCAmelCase : int = 128 __lowerCAmelCase : int = 3 __lowerCAmelCase : int = 1 __lowerCAmelCase : int = 5 # tx_args __lowerCAmelCase : float = 3e-5 __lowerCAmelCase : float = 0.0 __lowerCAmelCase : int = 20_000 __lowerCAmelCase : float = 0.0_095 __lowerCAmelCase : str = "bigbird-roberta-natural-questions" __lowerCAmelCase : str = "training-expt" __lowerCAmelCase : str = "data/nq-training.jsonl" __lowerCAmelCase : str = "data/nq-validation.jsonl" def lowercase__ ( self): '''simple docstring''' os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE_) lowercase__ : Any = os.path.join(self.base_dir , self.save_dir) lowercase__ : str = self.batch_size_per_device * jax.device_count() @dataclass class _snake_case : __lowerCAmelCase : int __lowerCAmelCase : int = 4_096 # no dynamic padding on TPUs def __call__( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Dict = self.collate_fn(SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) return batch def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ , lowercase__ : str = self.fetch_inputs(features["""input_ids"""]) lowercase__ : str = { """input_ids""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa), """attention_mask""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa), """start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa), """end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa), """pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa), } return batch def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : List[Any] = [self._fetch_inputs(SCREAMING_SNAKE_CASE_) for ids in input_ids] return zip(*SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = [1 for _ in range(len(SCREAMING_SNAKE_CASE_))] while len(SCREAMING_SNAKE_CASE_) < self.max_length: input_ids.append(self.pad_id) attention_mask.append(0) return input_ids, attention_mask def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=None ) -> Optional[Any]: '''simple docstring''' if seed is not None: lowercase__ : Any = dataset.shuffle(seed=lowercase_ ) for i in range(len(lowercase_ ) // batch_size ): lowercase__ : List[str] = dataset[i * batch_size : (i + 1) * batch_size] yield dict(lowercase_ ) @partial(jax.pmap , axis_name="""batch""" ) def UpperCamelCase ( lowercase_ , lowercase_ , **lowercase_ ) -> int: '''simple docstring''' def loss_fn(lowercase_ ): lowercase__ : Dict = model_inputs.pop("""start_labels""" ) lowercase__ : List[Any] = model_inputs.pop("""end_labels""" ) lowercase__ : List[Any] = model_inputs.pop("""pooled_labels""" ) lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=lowercase_ , dropout_rng=lowercase_ , train=lowercase_ ) lowercase__ , lowercase__ , lowercase__ : Any = outputs return state.loss_fn( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) lowercase__ , lowercase__ : Optional[int] = jax.random.split(lowercase_ ) lowercase__ : Tuple = jax.value_and_grad(lowercase_ ) lowercase__ , lowercase__ : Optional[int] = grad_fn(state.params ) lowercase__ : Tuple = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) lowercase__ : Any = jax.lax.pmean(lowercase_ , """batch""" ) lowercase__ : str = state.apply_gradients(grads=lowercase_ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="""batch""" ) def UpperCamelCase ( lowercase_ , **lowercase_ ) -> str: '''simple docstring''' lowercase__ : Tuple = model_inputs.pop("""start_labels""" ) lowercase__ : List[str] = model_inputs.pop("""end_labels""" ) lowercase__ : int = model_inputs.pop("""pooled_labels""" ) lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=state.params , train=lowercase_ ) lowercase__ , lowercase__ , lowercase__ : Optional[int] = outputs lowercase__ : Optional[Any] = state.loss_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : List[str] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) return metrics class _snake_case ( train_state.TrainState ): __lowerCAmelCase : Callable = struct.field(pytree_node=UpperCAmelCase_ ) @dataclass class _snake_case : __lowerCAmelCase : Args __lowerCAmelCase : Callable __lowerCAmelCase : Callable __lowerCAmelCase : Callable __lowerCAmelCase : Callable __lowerCAmelCase : wandb __lowerCAmelCase : Callable = None def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None): '''simple docstring''' lowercase__ : List[str] = model.params lowercase__ : Dict = TrainState.create( apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , loss_fn=SCREAMING_SNAKE_CASE_ , ) if ckpt_dir is not None: lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = restore_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : str = { """lr""": args.lr, """init_lr""": args.init_lr, """warmup_steps""": args.warmup_steps, """num_train_steps""": num_train_steps, """weight_decay""": args.weight_decay, } lowercase__ , lowercase__ : Any = build_tx(**SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = train_state.TrainState( step=SCREAMING_SNAKE_CASE_ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , opt_state=SCREAMING_SNAKE_CASE_ , ) lowercase__ : Optional[Any] = args lowercase__ : Union[str, Any] = data_collator lowercase__ : str = lr lowercase__ : Union[str, Any] = params lowercase__ : Dict = jax_utils.replicate(SCREAMING_SNAKE_CASE_) return state def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = self.args lowercase__ : List[str] = len(SCREAMING_SNAKE_CASE_) // args.batch_size lowercase__ : int = jax.random.PRNGKey(0) lowercase__ : Union[str, Any] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count()) for epoch in range(args.max_epochs): lowercase__ : Tuple = jnp.array(0 , dtype=jnp.floataa) lowercase__ : List[str] = get_batched_dataset(SCREAMING_SNAKE_CASE_ , args.batch_size , seed=SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = 0 for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc=f'Running EPOCH-{epoch}'): lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_) lowercase__ , lowercase__ , lowercase__ : List[Any] = self.train_step_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) running_loss += jax_utils.unreplicate(metrics["""loss"""]) i += 1 if i % args.logging_steps == 0: lowercase__ : List[str] = jax_utils.unreplicate(state.step) lowercase__ : str = running_loss.item() / i lowercase__ : Tuple = self.scheduler_fn(state_step - 1) lowercase__ : Tuple = self.evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = { """step""": state_step.item(), """eval_loss""": eval_loss.item(), """tr_loss""": tr_loss, """lr""": lr.item(), } tqdm.write(str(SCREAMING_SNAKE_CASE_)) self.logger.log(SCREAMING_SNAKE_CASE_ , commit=SCREAMING_SNAKE_CASE_) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Dict = get_batched_dataset(SCREAMING_SNAKE_CASE_ , self.args.batch_size) lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE_) // self.args.batch_size lowercase__ : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa) lowercase__ : Optional[Any] = 0 for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc="""Evaluating ... """): lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = self.val_step_fn(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) running_loss += jax_utils.unreplicate(metrics["""loss"""]) i += 1 return running_loss / i def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = jax_utils.unreplicate(SCREAMING_SNAKE_CASE_) print(f'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """) self.model_save_fn(SCREAMING_SNAKE_CASE_ , params=state.params) with open(os.path.join(SCREAMING_SNAKE_CASE_ , """opt_state.msgpack""") , """wb""") as f: f.write(to_bytes(state.opt_state)) joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE_ , """args.joblib""")) joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE_ , """data_collator.joblib""")) with open(os.path.join(SCREAMING_SNAKE_CASE_ , """training_state.json""") , """w""") as f: json.dump({"""step""": state.step.item()} , SCREAMING_SNAKE_CASE_) print("""DONE""") def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ ) with open(os.path.join(lowercase_ , """flax_model.msgpack""" ) , """rb""" ) as f: lowercase__ : Optional[Any] = from_bytes(state.params , f.read() ) with open(os.path.join(lowercase_ , """opt_state.msgpack""" ) , """rb""" ) as f: lowercase__ : Dict = from_bytes(state.opt_state , f.read() ) lowercase__ : Any = joblib.load(os.path.join(lowercase_ , """args.joblib""" ) ) lowercase__ : Optional[int] = joblib.load(os.path.join(lowercase_ , """data_collator.joblib""" ) ) with open(os.path.join(lowercase_ , """training_state.json""" ) , """r""" ) as f: lowercase__ : int = json.load(lowercase_ ) lowercase__ : Optional[Any] = training_state["""step"""] print("""DONE""" ) return params, opt_state, step, args, data_collator def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' lowercase__ : Optional[int] = num_train_steps - warmup_steps lowercase__ : int = optax.linear_schedule(init_value=lowercase_ , end_value=lowercase_ , transition_steps=lowercase_ ) lowercase__ : Optional[int] = optax.linear_schedule(init_value=lowercase_ , end_value=1E-7 , transition_steps=lowercase_ ) lowercase__ : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]: '''simple docstring''' def weight_decay_mask(lowercase_ ): lowercase__ : Dict = traverse_util.flatten_dict(lowercase_ ) lowercase__ : int = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()} return traverse_util.unflatten_dict(lowercase_ ) lowercase__ : Optional[int] = scheduler_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : int = optax.adamw(learning_rate=lowercase_ , weight_decay=lowercase_ , mask=lowercase_ ) return tx, lr
12
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ : List[str] = logging.get_logger(__name__) lowerCamelCase__ : str = { """facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""", """facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""", """facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""", """facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""", """facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""", """facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""", """facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""", """facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""", """facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""", } class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Optional[int] = 'xmod' def __init__( self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=("en_XX",) , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : Dict = vocab_size lowercase__ : Any = hidden_size lowercase__ : List[Any] = num_hidden_layers lowercase__ : List[Any] = num_attention_heads lowercase__ : Union[str, Any] = hidden_act lowercase__ : Any = intermediate_size lowercase__ : Optional[int] = hidden_dropout_prob lowercase__ : Dict = attention_probs_dropout_prob lowercase__ : Union[str, Any] = max_position_embeddings lowercase__ : Union[str, Any] = type_vocab_size lowercase__ : Optional[int] = initializer_range lowercase__ : Optional[Any] = layer_norm_eps lowercase__ : Optional[int] = position_embedding_type lowercase__ : List[str] = use_cache lowercase__ : Tuple = classifier_dropout lowercase__ : Any = pre_norm lowercase__ : Any = adapter_reduction_factor lowercase__ : str = adapter_layer_norm lowercase__ : str = adapter_reuse_layer_norm lowercase__ : Any = ln_before_adapter lowercase__ : List[Any] = list(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = default_language class _snake_case ( UpperCAmelCase_ ): @property def lowercase__ ( self): '''simple docstring''' if self.task == "multiple-choice": lowercase__ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase__ : int = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ])
12
lowerCamelCase__ : List[str] = """ # Installazione di Transformers ! pip install transformers datasets # Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git """ lowerCamelCase__ : List[Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowerCamelCase__ : int = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
12
1
from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> list[float]: '''simple docstring''' lowercase__ , lowercase__ : Optional[Any] = coefficient_matrix.shape lowercase__ , lowercase__ : str = constant_matrix.shape if rowsa != colsa: lowercase__ : Optional[int] = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}' raise ValueError(lowercase_ ) if colsa != 1: lowercase__ : List[str] = F'Constant matrix must be nx1 but received {rowsa}x{colsa}' raise ValueError(lowercase_ ) if rowsa != rowsa: lowercase__ : List[str] = ( """Coefficient and constant matrices dimensions must be nxn and nx1 but """ F'received {rowsa}x{colsa} and {rowsa}x{colsa}' ) raise ValueError(lowercase_ ) if len(lowercase_ ) != rowsa: lowercase__ : Dict = ( """Number of initial values must be equal to number of rows in coefficient """ F'matrix but received {len(lowercase_ )} and {rowsa}' ) raise ValueError(lowercase_ ) if iterations <= 0: raise ValueError("""Iterations must be at least 1""" ) lowercase__ : NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) lowercase__ , lowercase__ : Dict = table.shape strictly_diagonally_dominant(lowercase_ ) # Iterates the whole matrix for given number of times for _ in range(lowercase_ ): lowercase__ : List[Any] = [] for row in range(lowercase_ ): lowercase__ : Optional[int] = 0 for col in range(lowercase_ ): if col == row: lowercase__ : Optional[int] = table[row][col] elif col == cols - 1: lowercase__ : Optional[Any] = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] lowercase__ : Dict = (temp + val) / denom new_val.append(lowercase_ ) lowercase__ : Any = new_val return [float(lowercase_ ) for i in new_val] def UpperCamelCase ( lowercase_ ) -> bool: '''simple docstring''' lowercase__ , lowercase__ : Any = table.shape lowercase__ : str = True for i in range(0 , lowercase_ ): lowercase__ : List[Any] = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
12
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class _snake_case : def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ): '''simple docstring''' lowercase__ : str = parent lowercase__ : Optional[int] = batch_size lowercase__ : Optional[int] = seq_length lowercase__ : Union[str, Any] = is_training lowercase__ : Any = use_input_mask lowercase__ : Optional[int] = use_token_type_ids lowercase__ : Optional[Any] = use_labels lowercase__ : Optional[int] = vocab_size lowercase__ : Optional[Any] = hidden_size lowercase__ : Any = rotary_dim lowercase__ : Optional[Any] = num_hidden_layers lowercase__ : Tuple = num_attention_heads lowercase__ : Tuple = intermediate_size lowercase__ : List[str] = hidden_act lowercase__ : Optional[Any] = hidden_dropout_prob lowercase__ : int = attention_probs_dropout_prob lowercase__ : Any = max_position_embeddings lowercase__ : Optional[int] = initializer_range lowercase__ : Optional[int] = None lowercase__ : str = vocab_size - 1 lowercase__ : Any = vocab_size - 1 lowercase__ : Dict = vocab_size - 1 def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ : Any = None if self.use_input_mask: lowercase__ : Dict = random_attention_mask([self.batch_size, self.seq_length]) lowercase__ : List[Any] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Optional[Any] = config_and_inputs lowercase__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = 20 lowercase__ : int = model_class_name(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_) lowercase__ : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""") lowercase__ : Tuple = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1)) lowercase__ : List[str] = model( input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""") lowercase__ : str = model( input_ids[:, -1:] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE_) lowercase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}') def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Union[str, Any] = 20 lowercase__ : List[Any] = model_class_name(SCREAMING_SNAKE_CASE_) lowercase__ : Dict = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , ) lowercase__ : Dict = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1)) lowercase__ : Any = model( input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""") lowercase__ : Tuple = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_) lowercase__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}') @require_flax class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Dict = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __lowerCAmelCase : str = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = FlaxGPTJModelTester(self) def lowercase__ ( self): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__ , lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__ , lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) @tooslow def lowercase__ ( self): '''simple docstring''' lowercase__ : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""") lowercase__ : List[str] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""") lowercase__ : Optional[Any] = False lowercase__ : List[str] = model.config.eos_token_id lowercase__ : List[Any] = jax.jit(model.generate) lowercase__ : Tuple = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id).sequences lowercase__ : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) @is_pt_flax_cross_test def lowercase__ ( self): '''simple docstring''' lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Any = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning lowercase__ : str = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ , lowercase__ : Dict = pt_inputs["""input_ids"""].shape lowercase__ : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,)) for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : str = 0 lowercase__ : List[Any] = 1 lowercase__ : Dict = 0 lowercase__ : Any = 1 lowercase__ : List[Any] = pt_model_class(SCREAMING_SNAKE_CASE_).eval() lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa) lowercase__ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = fx_state with torch.no_grad(): lowercase__ : Optional[int] = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple() lowercase__ : Dict = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_) lowercase__ : str = fx_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual( len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output_loaded, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2) @is_pt_flax_cross_test def lowercase__ ( self): '''simple docstring''' lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs lowercase__ : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : str = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning lowercase__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : str = pt_model_class(SCREAMING_SNAKE_CASE_).eval() lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa) lowercase__ : Optional[int] = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , fx_model.params) lowercase__ , lowercase__ : str = pt_inputs["""input_ids"""].shape lowercase__ : List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,)) for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Tuple = 0 lowercase__ : int = 1 lowercase__ : str = 0 lowercase__ : str = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): lowercase__ : Dict = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple() lowercase__ : Optional[Any] = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = pt_model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_flax=SCREAMING_SNAKE_CASE_) with torch.no_grad(): lowercase__ : Tuple = pt_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual( len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) @tooslow def lowercase__ ( self): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__ : Any = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""") lowercase__ : int = model(np.ones((1, 1))) self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
12
1
import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def UpperCamelCase ( ) -> None: '''simple docstring''' print("""Making key files...""" ) make_key_files("""rsa""" , 10_24 ) print("""Key files generation successful.""" ) def UpperCamelCase ( lowercase_ ) -> tuple[tuple[int, int], tuple[int, int]]: '''simple docstring''' print("""Generating prime p...""" ) lowercase__ : Dict = rabinMiller.generate_large_prime(lowercase_ ) print("""Generating prime q...""" ) lowercase__ : List[str] = rabinMiller.generate_large_prime(lowercase_ ) lowercase__ : int = p * q print("""Generating e that is relatively prime to (p - 1) * (q - 1)...""" ) while True: lowercase__ : List[Any] = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) ) if cryptoMath.gcd(lowercase_ , (p - 1) * (q - 1) ) == 1: break print("""Calculating d that is mod inverse of e...""" ) lowercase__ : Dict = cryptoMath.find_mod_inverse(lowercase_ , (p - 1) * (q - 1) ) lowercase__ : Tuple = (n, e) lowercase__ : Union[str, Any] = (n, d) return (public_key, private_key) def UpperCamelCase ( lowercase_ , lowercase_ ) -> None: '''simple docstring''' if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ): print("""\nWARNING:""" ) print( F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n' """Use a different name or delete these files and re-run this program.""" ) sys.exit() lowercase__ , lowercase__ : List[Any] = generate_key(lowercase_ ) print(F'\nWriting public key to file {name}_pubkey.txt...' ) with open(F'{name}_pubkey.txt' , """w""" ) as out_file: out_file.write(F'{key_size},{public_key[0]},{public_key[1]}' ) print(F'Writing private key to file {name}_privkey.txt...' ) with open(F'{name}_privkey.txt' , """w""" ) as out_file: out_file.write(F'{key_size},{private_key[0]},{private_key[1]}' ) if __name__ == "__main__": main()
12
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Any = ['image_processor', 'tokenizer'] __lowerCAmelCase : Union[str, Any] = 'AutoImageProcessor' __lowerCAmelCase : int = 'AutoTokenizer' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = self.image_processor def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""") if text is not None: lowercase__ : List[str] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) if images is not None: lowercase__ : Optional[int] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) if text is not None and images is not None: lowercase__ : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_) , tensor_type=SCREAMING_SNAKE_CASE_) def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) @property def lowercase__ ( self): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
12
1
def UpperCamelCase ( ) -> List[Any]: '''simple docstring''' lowercase__ : Union[str, Any] = 0 for i in range(1 , 10_01 ): total += i**i return str(lowercase_ )[-10:] if __name__ == "__main__": print(solution())
12
def UpperCamelCase ( lowercase_ ) -> int: '''simple docstring''' if n == 1 or not isinstance(lowercase_ , lowercase_ ): return 0 elif n == 2: return 1 else: lowercase__ : List[Any] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def UpperCamelCase ( lowercase_ ) -> int: '''simple docstring''' lowercase__ : Optional[Any] = 0 lowercase__ : Dict = 2 while digits < n: index += 1 lowercase__ : str = len(str(fibonacci(lowercase_ ) ) ) return index def UpperCamelCase ( lowercase_ = 10_00 ) -> int: '''simple docstring''' return fibonacci_digits_index(lowercase_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
12
1
import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor lowerCamelCase__ : str = logging.get_logger(__name__) class _snake_case ( UpperCAmelCase_ ): def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' warnings.warn( """The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use YolosImageProcessor instead.""" , SCREAMING_SNAKE_CASE_ , ) super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
12
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter lowerCamelCase__ : Any = """Create a default config file for Accelerate with only a few flags set.""" def UpperCamelCase ( lowercase_="no" , lowercase_ = default_json_config_file , lowercase_ = False ) -> Any: '''simple docstring''' lowercase__ : Any = Path(lowercase_ ) path.parent.mkdir(parents=lowercase_ , exist_ok=lowercase_ ) if path.exists(): print( F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' ) return False lowercase__ : int = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' ) lowercase__ : Dict = { """compute_environment""": """LOCAL_MACHINE""", """mixed_precision""": mixed_precision, } if torch.cuda.is_available(): lowercase__ : Any = torch.cuda.device_count() lowercase__ : Any = num_gpus lowercase__ : Optional[int] = False if num_gpus > 1: lowercase__ : Tuple = """MULTI_GPU""" else: lowercase__ : Optional[Any] = """NO""" elif is_xpu_available() and use_xpu: lowercase__ : Union[str, Any] = torch.xpu.device_count() lowercase__ : str = num_xpus lowercase__ : List[Any] = False if num_xpus > 1: lowercase__ : str = """MULTI_XPU""" else: lowercase__ : Optional[Any] = """NO""" elif is_npu_available(): lowercase__ : Tuple = torch.npu.device_count() lowercase__ : Union[str, Any] = num_npus lowercase__ : Union[str, Any] = False if num_npus > 1: lowercase__ : List[Any] = """MULTI_NPU""" else: lowercase__ : int = """NO""" else: lowercase__ : Union[str, Any] = 0 lowercase__ : str = True lowercase__ : Union[str, Any] = 1 lowercase__ : int = """NO""" lowercase__ : Tuple = ClusterConfig(**lowercase_ ) config.to_json_file(lowercase_ ) return path def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' lowercase__ : List[str] = parser.add_parser("""default""" , parents=lowercase_ , help=lowercase_ , formatter_class=lowercase_ ) parser.add_argument( """--config_file""" , default=lowercase_ , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) , dest="""save_location""" , ) parser.add_argument( """--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=lowercase_ , help="""Whether or not to use mixed precision training. """ """Choose between FP16 and BF16 (bfloat16) training. """ """BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , ) parser.set_defaults(func=lowercase_ ) return parser def UpperCamelCase ( lowercase_ ) -> Any: '''simple docstring''' lowercase__ : Optional[Any] = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F'accelerate configuration saved at {config_file}' )
12
1
import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _snake_case ( UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Tuple = CodeGenTokenizer __lowerCAmelCase : Optional[int] = CodeGenTokenizerFast __lowerCAmelCase : Any = True __lowerCAmelCase : Tuple = {'add_prefix_space': True} __lowerCAmelCase : str = False def lowercase__ ( self): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase__ : Union[str, Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", """<|endoftext|>""", ] lowercase__ : int = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_)))) lowercase__ : List[str] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] lowercase__ : Dict = {"""unk_token""": """<unk>"""} lowercase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_) + """\n""") with open(self.merges_file , """w""" , encoding="""utf-8""") as fp: fp.write("""\n""".join(SCREAMING_SNAKE_CASE_)) def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' kwargs.update(self.special_tokens_map) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' kwargs.update(self.special_tokens_map) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : int = """lower newer""" lowercase__ : Optional[Any] = """lower newer""" return input_text, output_text def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map) lowercase__ : List[Any] = """lower newer""" lowercase__ : str = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] lowercase__ : Dict = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = tokens + [tokenizer.unk_token] lowercase__ : int = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' if not self.test_rust_tokenizer: return lowercase__ : str = self.get_tokenizer() lowercase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = """lower newer""" # Testing tokenization lowercase__ : int = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_) lowercase__ : int = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) # Testing conversion to ids without special tokens lowercase__ : str = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) # Testing conversion to ids with special tokens lowercase__ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE_) lowercase__ : int = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) # Testing the unknown token lowercase__ : Optional[int] = tokens + [rust_tokenizer.unk_token] lowercase__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , SCREAMING_SNAKE_CASE_) def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' pass def lowercase__ ( self , SCREAMING_SNAKE_CASE_=15): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'): lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) # Simple input lowercase__ : Optional[int] = """This is a simple input""" lowercase__ : Any = ["""This is a simple input 1""", """This is a simple input 2"""] lowercase__ : Optional[Any] = ("""This is a simple input""", """This is a pair""") lowercase__ : int = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""") # Simple input self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""") # Simple input self.assertRaises( SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""" , ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""") # Pair input self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""") # Pair input self.assertRaises( SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""" , ) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""") # Simple input lowercase__ : Dict = """This is a simple input""" lowercase__ : Optional[int] = ["""This is a simple input looooooooong""", """This is a simple input"""] lowercase__ : List[str] = ("""This is a simple input""", """This is a pair""") lowercase__ : Any = [ ("""This is a simple input loooooong""", """This is a simple input"""), ("""This is a simple pair loooooong""", """This is a simple pair"""), ] lowercase__ : Union[str, Any] = tokenizer.pad_token_id lowercase__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=30 , return_tensors="""np""") lowercase__ : Any = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncate=SCREAMING_SNAKE_CASE_ , return_tensors="""np""") lowercase__ : List[Any] = tokenizer(*SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=60 , return_tensors="""np""") lowercase__ : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncate=SCREAMING_SNAKE_CASE_ , return_tensors="""np""") # s # test single string max_length padding self.assertEqual(out_s["""input_ids"""].shape[-1] , 30) self.assertTrue(pad_token_id in out_s["""input_ids"""]) self.assertTrue(0 in out_s["""attention_mask"""]) # s2 # test automatic padding self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["""input_ids"""][0]) self.assertFalse(0 in out_sa["""attention_mask"""][0]) # short slice does have padding self.assertTrue(pad_token_id in out_sa["""input_ids"""][1]) self.assertTrue(0 in out_sa["""attention_mask"""][1]) # p # test single pair max_length padding self.assertEqual(out_p["""input_ids"""].shape[-1] , 60) self.assertTrue(pad_token_id in out_p["""input_ids"""]) self.assertTrue(0 in out_p["""attention_mask"""]) # p2 # test automatic padding pair self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["""input_ids"""][0]) self.assertFalse(0 in out_pa["""attention_mask"""][0]) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["""input_ids"""][1]) self.assertTrue(0 in out_pa["""attention_mask"""][1]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = """$$$""" lowercase__ : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE_ , add_bos_token=SCREAMING_SNAKE_CASE_) lowercase__ : Any = """This is a simple input""" lowercase__ : Dict = ["""This is a simple input 1""", """This is a simple input 2"""] lowercase__ : Optional[Any] = tokenizer.bos_token_id lowercase__ : Any = tokenizer(SCREAMING_SNAKE_CASE_) lowercase__ : Any = tokenizer(SCREAMING_SNAKE_CASE_) self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE_) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids)) lowercase__ : Any = tokenizer.decode(out_s.input_ids) lowercase__ : int = tokenizer.batch_decode(out_sa.input_ids) self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE_) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa)) @slow def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""") lowercase__ : Optional[Any] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#""" lowercase__ : List[Any] = """\nif len_a > len_b: result = a\nelse: result = b""" lowercase__ : Tuple = tokenizer.encode(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = ["""^#""", re.escape("""<|endoftext|>"""), """^'''""", """^\"\"\"""", """\n\n\n"""] lowercase__ : int = tokenizer.decode(SCREAMING_SNAKE_CASE_ , truncate_before_pattern=SCREAMING_SNAKE_CASE_) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' pass
12
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ : List[Any] = logging.get_logger(__name__) lowerCamelCase__ : Union[str, Any] = { """YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""", """YituTech/conv-bert-medium-small""": ( """https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json""" ), """YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""", # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Union[str, Any] = 'convbert' def __init__( self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=9 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) lowercase__ : Dict = vocab_size lowercase__ : List[Any] = hidden_size lowercase__ : Optional[Any] = num_hidden_layers lowercase__ : Union[str, Any] = num_attention_heads lowercase__ : List[str] = intermediate_size lowercase__ : Optional[int] = hidden_act lowercase__ : Tuple = hidden_dropout_prob lowercase__ : List[str] = attention_probs_dropout_prob lowercase__ : Tuple = max_position_embeddings lowercase__ : Dict = type_vocab_size lowercase__ : Union[str, Any] = initializer_range lowercase__ : Dict = layer_norm_eps lowercase__ : Tuple = embedding_size lowercase__ : List[str] = head_ratio lowercase__ : Dict = conv_kernel_size lowercase__ : Dict = num_groups lowercase__ : int = classifier_dropout class _snake_case ( UpperCAmelCase_ ): @property def lowercase__ ( self): '''simple docstring''' if self.task == "multiple-choice": lowercase__ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase__ : str = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ])
12
1
def UpperCamelCase ( lowercase_ , lowercase_ ) -> int: '''simple docstring''' return abs(lowercase_ ) if a == 0 else greatest_common_divisor(b % a , lowercase_ ) def UpperCamelCase ( lowercase_ , lowercase_ ) -> int: '''simple docstring''' while y: # --> when y=0 then loop will terminate and return x as final GCD. lowercase__ , lowercase__ : Tuple = y, x % y return abs(lowercase_ ) def UpperCamelCase ( ) -> Optional[Any]: '''simple docstring''' try: lowercase__ : Dict = input("""Enter two integers separated by comma (,): """ ).split(""",""" ) lowercase__ : str = int(nums[0] ) lowercase__ : Dict = int(nums[1] ) print( F'greatest_common_divisor({num_a}, {num_a}) = ' F'{greatest_common_divisor(lowercase_ , lowercase_ )}' ) print(F'By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowercase_ , lowercase_ )}' ) except (IndexError, UnboundLocalError, ValueError): print("""Wrong input""" ) if __name__ == "__main__": main()
12
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder lowerCamelCase__ : Any = datasets.utils.logging.get_logger(__name__) class _snake_case ( folder_based_builder.FolderBasedBuilderConfig ): __lowerCAmelCase : bool = None __lowerCAmelCase : bool = None class _snake_case ( folder_based_builder.FolderBasedBuilder ): __lowerCAmelCase : Optional[Any] = datasets.Audio() __lowerCAmelCase : Union[str, Any] = 'audio' __lowerCAmelCase : str = AudioFolderConfig __lowerCAmelCase : List[str] # definition at the bottom of the script __lowerCAmelCase : Optional[int] = AudioClassification(audio_column='audio' , label_column='label' ) lowerCamelCase__ : int = [ """.aiff""", """.au""", """.avr""", """.caf""", """.flac""", """.htk""", """.svx""", """.mat4""", """.mat5""", """.mpc2k""", """.ogg""", """.paf""", """.pvf""", """.raw""", """.rf64""", """.sd2""", """.sds""", """.ircam""", """.voc""", """.w64""", """.wav""", """.nist""", """.wavex""", """.wve""", """.xi""", """.mp3""", """.opus""", ] lowerCamelCase__ : int = AUDIO_EXTENSIONS
12
1
from math import sqrt def UpperCamelCase ( lowercase_ ) -> bool: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(lowercase_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def UpperCamelCase ( lowercase_ = 1_00_01 ) -> int: '''simple docstring''' lowercase__ : int = 0 lowercase__ : Dict = 1 while count != nth and number < 3: number += 1 if is_prime(lowercase_ ): count += 1 while count != nth: number += 2 if is_prime(lowercase_ ): count += 1 return number if __name__ == "__main__": print(f'''{solution() = }''')
12
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : int = (DDPMScheduler,) def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = { """num_train_timesteps""": 10_00, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", """variance_type""": """fixed_small""", """clip_sample""": True, } config.update(**SCREAMING_SNAKE_CASE_) return config def lowercase__ ( self): '''simple docstring''' for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2]): self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , ) def lowercase__ ( self): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for t in [0, 5_00, 9_99]: self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = self.scheduler_classes[0] lowercase__ : Union[str, Any] = self.get_scheduler_config() lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_) assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87) - 0.0_0_9_7_9)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99) - 0.0_2)) < 1E-5 def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = self.scheduler_classes[0] lowercase__ : str = self.get_scheduler_config() lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_) lowercase__ : int = len(SCREAMING_SNAKE_CASE_) lowercase__ : Any = self.dummy_model() lowercase__ : List[Any] = self.dummy_sample_deter lowercase__ : str = torch.manual_seed(0) for t in reversed(range(SCREAMING_SNAKE_CASE_)): # 1. predict noise residual lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) # 2. predict previous mean of sample x_t-1 lowercase__ : List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowercase__ : str = pred_prev_sample lowercase__ : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_)) lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_)) assert abs(result_sum.item() - 2_5_8.9_6_0_6) < 1E-2 assert abs(result_mean.item() - 0.3_3_7_2) < 1E-3 def lowercase__ ( self): '''simple docstring''' lowercase__ : List[Any] = self.scheduler_classes[0] lowercase__ : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""") lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_) lowercase__ : Dict = len(SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = self.dummy_model() lowercase__ : Union[str, Any] = self.dummy_sample_deter lowercase__ : int = torch.manual_seed(0) for t in reversed(range(SCREAMING_SNAKE_CASE_)): # 1. predict noise residual lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) # 2. predict previous mean of sample x_t-1 lowercase__ : int = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowercase__ : Tuple = pred_prev_sample lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_)) lowercase__ : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_)) assert abs(result_sum.item() - 2_0_2.0_2_9_6) < 1E-2 assert abs(result_mean.item() - 0.2_6_3_1) < 1E-3 def lowercase__ ( self): '''simple docstring''' lowercase__ : str = self.scheduler_classes[0] lowercase__ : int = self.get_scheduler_config() lowercase__ : str = scheduler_class(**SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = [1_00, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = scheduler.timesteps for i, timestep in enumerate(SCREAMING_SNAKE_CASE_): if i == len(SCREAMING_SNAKE_CASE_) - 1: lowercase__ : Optional[int] = -1 else: lowercase__ : Tuple = timesteps[i + 1] lowercase__ : Any = scheduler.previous_timestep(SCREAMING_SNAKE_CASE_) lowercase__ : int = prev_t.item() self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = self.scheduler_classes[0] lowercase__ : List[Any] = self.get_scheduler_config() lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = [1_00, 87, 50, 51, 0] with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`custom_timesteps` must be in descending order."""): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = self.scheduler_classes[0] lowercase__ : List[Any] = self.get_scheduler_config() lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_) lowercase__ : int = [1_00, 87, 50, 1, 0] lowercase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE_) with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`."""): scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = self.scheduler_classes[0] lowercase__ : int = self.get_scheduler_config() lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_) lowercase__ : str = [scheduler.config.num_train_timesteps] with self.assertRaises( SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
12
1
import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]: '''simple docstring''' lowercase__ : Tuple = s.rsplit(lowercase_ , lowercase_ ) return new.join(lowercase_ ) def UpperCamelCase ( lowercase_ ) -> Union[str, Any]: '''simple docstring''' return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def UpperCamelCase ( lowercase_ ) -> int: '''simple docstring''' lowercase__ : Tuple = {} lowercase__ : Tuple = ["""group_1""", """group_2""", """group_3""", """group_4"""] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: lowercase__ : Tuple = key.replace(F'{group_key}.' , F'{group_key}.group.' ) if "res_path" in key: lowercase__ : Any = key.replace("""res_path.""" , """res_path.path.""" ) if key.endswith(""".w""" ): lowercase__ : Tuple = rreplace(lowercase_ , """.w""" , """.weight""" , 1 ) if key.endswith(""".b""" ): lowercase__ : Dict = rreplace(lowercase_ , """.b""" , """.bias""" , 1 ) lowercase__ : Any = value.float() return upgrade @torch.no_grad() def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=None , lowercase_=True ) -> Union[str, Any]: '''simple docstring''' from dall_e import Encoder lowercase__ : List[str] = Encoder() if os.path.exists(lowercase_ ): lowercase__ : str = torch.load(lowercase_ ) else: lowercase__ : Any = torch.hub.load_state_dict_from_url(lowercase_ ) if isinstance(lowercase_ , lowercase_ ): lowercase__ : Tuple = ckpt.state_dict() encoder.load_state_dict(lowercase_ ) if config_path is not None: lowercase__ : List[str] = FlavaImageCodebookConfig.from_pretrained(lowercase_ ) else: lowercase__ : int = FlavaImageCodebookConfig() lowercase__ : Tuple = FlavaImageCodebook(lowercase_ ).eval() lowercase__ : int = encoder.state_dict() lowercase__ : Tuple = upgrade_state_dict(lowercase_ ) hf_model.load_state_dict(lowercase_ ) lowercase__ : Tuple = hf_model.state_dict() lowercase__ : Any = count_parameters(lowercase_ ) lowercase__ : Dict = count_parameters(lowercase_ ) assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) if save_checkpoint: hf_model.save_pretrained(lowercase_ ) else: return hf_state_dict if __name__ == "__main__": lowerCamelCase__ : List[str] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") lowerCamelCase__ : List[str] = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
12
def UpperCamelCase ( lowercase_ ) -> float: '''simple docstring''' if not nums: # Makes sure that the list is not empty raise ValueError("""List is empty""" ) lowercase__ : int = sum(lowercase_ ) / len(lowercase_ ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(lowercase_ ) if __name__ == "__main__": import doctest doctest.testmod()
12
1
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys lowerCamelCase__ : List[Any] = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""") lowerCamelCase__ : Optional[int] = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split() lowerCamelCase__ : Tuple = """|""".join(sys.argv[1:]) lowerCamelCase__ : Optional[int] = re.compile(Rf'''^({joined_dirs}).*?\.py$''') lowerCamelCase__ : int = [x for x in modified_files if regex.match(x)] print(""" """.join(relevant_modified_files), end="""""")
12
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__) class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Any = ['pixel_values'] def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 8 , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = do_rescale lowercase__ : List[Any] = rescale_factor lowercase__ : Tuple = do_pad lowercase__ : Optional[Any] = pad_size def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None): '''simple docstring''' lowercase__ , lowercase__ : Optional[int] = get_image_size(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = (old_height // size + 1) * size - old_height lowercase__ : str = (old_width // size + 1) * size - old_width return pad(SCREAMING_SNAKE_CASE_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' lowercase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : Union[str, Any] = do_pad if do_pad is not None else self.do_pad lowercase__ : Optional[Any] = pad_size if pad_size is not None else self.pad_size lowercase__ : str = make_list_of_images(SCREAMING_SNAKE_CASE_) if not valid_images(SCREAMING_SNAKE_CASE_): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""") if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""") # All transformations expect numpy arrays. lowercase__ : List[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE_) for image in images] if do_rescale: lowercase__ : str = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_) for image in images] if do_pad: lowercase__ : List[str] = [self.pad(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_) for image in images] lowercase__ : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) for image in images] lowercase__ : Dict = {"""pixel_values""": images} return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_)
12
1
import contextlib import os import sqlitea import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def UpperCamelCase ( lowercase_ , lowercase_ ) -> Union[str, Any]: '''simple docstring''' assert isinstance(lowercase_ , lowercase_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]: '''simple docstring''' lowercase__ : Tuple = tmp_path / """cache""" lowercase__ : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : Optional[int] = SqlDatasetReader( """dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read() _check_sql_dataset(lowercase_ , lowercase_ ) @require_sqlalchemy @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict: '''simple docstring''' lowercase__ : Optional[int] = tmp_path / """cache""" lowercase__ : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} lowercase__ : Optional[int] = features.copy() if features else default_expected_features lowercase__ : str = ( Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None ) lowercase__ : Tuple = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=lowercase_ , cache_dir=lowercase_ ).read() _check_sql_dataset(lowercase_ , lowercase_ ) def UpperCamelCase ( lowercase_ ) -> int: '''simple docstring''' with contextlib.closing(sqlitea.connect(lowercase_ ) ) as con: lowercase__ : str = con.cursor() cur.execute("""SELECT * FROM dataset""" ) for row in cur: yield row @require_sqlalchemy def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]: '''simple docstring''' lowercase__ : str = tmp_path / """cache""" lowercase__ : Optional[Any] = os.path.join(lowercase_ , """tmp.sql""" ) lowercase__ : Union[str, Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowercase_ ).read() SqlDatasetWriter(lowercase_ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write() lowercase__ : Optional[Any] = iter_sql_file(lowercase_ ) lowercase__ : int = iter_sql_file(lowercase_ ) for rowa, rowa in zip(lowercase_ , lowercase_ ): assert rowa == rowa @require_sqlalchemy def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> str: '''simple docstring''' lowercase__ : List[str] = tmp_path / """cache""" lowercase__ : str = os.path.join(lowercase_ , """tmp.sql""" ) lowercase__ : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowercase_ ).read() SqlDatasetWriter(lowercase_ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write() lowercase__ : List[Any] = iter_sql_file(lowercase_ ) lowercase__ : Tuple = iter_sql_file(lowercase_ ) for rowa, rowa in zip(lowercase_ , lowercase_ ): assert rowa == rowa @require_sqlalchemy def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' lowercase__ : Tuple = tmp_path / """cache""" lowercase__ : Union[str, Any] = os.path.join(lowercase_ , """tmp.sql""" ) lowercase__ : Dict = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowercase_ ).read() with pytest.raises(lowercase_ ): SqlDatasetWriter(lowercase_ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
12
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu lowerCamelCase__ : Optional[int] = [ """EAGER""", """AOT_EAGER""", """INDUCTOR""", """NVFUSER""", """AOT_NVFUSER""", """AOT_CUDAGRAPHS""", """OFI""", """FX2TRT""", """ONNXRT""", """IPEX""", ] def UpperCamelCase ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[Any]: '''simple docstring''' lowercase__ : List[Any] = True while ask_again: lowercase__ : Tuple = input(lowercase_ ) try: if default is not None and len(lowercase_ ) == 0: return default return convert_value(lowercase_ ) if convert_value is not None else result except Exception: if error_message is not None: print(lowercase_ ) def UpperCamelCase ( lowercase_ , lowercase_=[] , lowercase_=None , lowercase_=0 ) -> Union[str, Any]: '''simple docstring''' lowercase__ : List[Any] = BulletMenu(lowercase_ , lowercase_ ) lowercase__ : Any = menu.run(default_choice=lowercase_ ) return convert_value(lowercase_ ) if convert_value is not None else result def UpperCamelCase ( lowercase_ ) -> str: '''simple docstring''' lowercase__ : Union[str, Any] = int(lowercase_ ) return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] ) def UpperCamelCase ( lowercase_ ) -> Optional[int]: '''simple docstring''' lowercase__ : List[str] = int(lowercase_ ) return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] ) def UpperCamelCase ( lowercase_ ) -> str: '''simple docstring''' lowercase__ : str = int(lowercase_ ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def UpperCamelCase ( lowercase_ ) -> Union[str, Any]: '''simple docstring''' lowercase__ : List[Any] = int(lowercase_ ) return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] ) def UpperCamelCase ( lowercase_ ) -> Optional[int]: '''simple docstring''' lowercase__ : List[Any] = int(lowercase_ ) return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] ) def UpperCamelCase ( lowercase_ ) -> Optional[int]: '''simple docstring''' return {"yes": True, "no": False}[value.lower()] class _snake_case ( argparse.RawDescriptionHelpFormatter ): def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : int = super()._format_usage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = usage.replace("""<command> [<args>] """ , """""") return usage
12
1
from __future__ import annotations def UpperCamelCase ( lowercase_ , lowercase_ ) -> Union[str, Any]: '''simple docstring''' print(F'Vertex\tShortest Distance from vertex {src}' ) for i, d in enumerate(lowercase_ ): print(F'{i}\t\t{d}' ) def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' for j in range(lowercase_ ): lowercase__ , lowercase__ , lowercase__ : str = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> list[float]: '''simple docstring''' lowercase__ : Tuple = [float("""inf""" )] * vertex_count lowercase__ : Optional[int] = 0.0 for _ in range(vertex_count - 1 ): for j in range(lowercase_ ): lowercase__ , lowercase__ , lowercase__ : int = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: lowercase__ : Optional[Any] = distance[u] + w lowercase__ : Union[str, Any] = check_negative_cycle(lowercase_ , lowercase_ , lowercase_ ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase__ : Dict = int(input("""Enter number of vertices: """).strip()) lowerCamelCase__ : Tuple = int(input("""Enter number of edges: """).strip()) lowerCamelCase__ : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print("""Edge """, i + 1) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = ( int(x) for x in input("""Enter source, destination, weight: """).strip().split(""" """) ) lowerCamelCase__ : Union[str, Any] = {"""src""": src, """dst""": dest, """weight""": weight} lowerCamelCase__ : Union[str, Any] = int(input("""\nEnter shortest path source:""").strip()) lowerCamelCase__ : Any = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
12
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ : Tuple = { """configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""], """processing_mgp_str""": ["""MgpstrProcessor"""], """tokenization_mgp_str""": ["""MgpstrTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Optional[int] = [ """MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""", """MgpstrModel""", """MgpstrPreTrainedModel""", """MgpstrForSceneTextRecognition""", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
12
1
lowerCamelCase__ : int = [ (1_0_0_0, """M"""), (9_0_0, """CM"""), (5_0_0, """D"""), (4_0_0, """CD"""), (1_0_0, """C"""), (9_0, """XC"""), (5_0, """L"""), (4_0, """XL"""), (1_0, """X"""), (9, """IX"""), (5, """V"""), (4, """IV"""), (1, """I"""), ] def UpperCamelCase ( lowercase_ ) -> int: '''simple docstring''' lowercase__ : List[str] = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 1_00, """D""": 5_00, """M""": 10_00} lowercase__ : Optional[Any] = 0 lowercase__ : Any = 0 while place < len(lowercase_ ): if (place + 1 < len(lowercase_ )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def UpperCamelCase ( lowercase_ ) -> str: '''simple docstring''' lowercase__ : List[str] = [] for arabic, roman in ROMAN: ((lowercase__) , (lowercase__)) : Tuple = divmod(lowercase_ , lowercase_ ) result.append(roman * factor ) if number == 0: break return "".join(lowercase_ ) if __name__ == "__main__": import doctest doctest.testmod()
12
import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class _snake_case ( UpperCAmelCase_ ): def __init__( self): '''simple docstring''' lowercase__ : List[Any] = [] def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_init_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_train_begin""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_train_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_epoch_begin""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_epoch_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_step_begin""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_step_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_evaluate""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_predict""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_save""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_log""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_prediction_step""") @require_torch class _snake_case ( unittest.TestCase ): def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = tempfile.mkdtemp() def lowercase__ ( self): '''simple docstring''' shutil.rmtree(self.output_dir) def lowercase__ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Any = RegressionDataset(length=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = RegressionDataset(length=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = RegressionModelConfig(a=SCREAMING_SNAKE_CASE_ , b=SCREAMING_SNAKE_CASE_) lowercase__ : Any = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE_) lowercase__ : Any = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE_ , report_to=[] , **SCREAMING_SNAKE_CASE_) return Trainer( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , callbacks=SCREAMING_SNAKE_CASE_ , ) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_)) # Order doesn't matter lowercase__ : str = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__) lowercase__ : Tuple = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__) for cba, cba in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assertEqual(SCREAMING_SNAKE_CASE_ , cba.__class__) elif not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE_) else: self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : int = ["""on_init_end""", """on_train_begin"""] lowercase__ : Union[str, Any] = 0 lowercase__ : Union[str, Any] = len(trainer.get_eval_dataloader()) lowercase__ : Dict = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader()) + ["""on_log""", """on_evaluate"""] for _ in range(trainer.state.num_train_epochs): expected_events.append("""on_epoch_begin""") for _ in range(SCREAMING_SNAKE_CASE_): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("""on_log""") if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("""on_save""") expected_events.append("""on_epoch_end""") if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def lowercase__ ( self): '''simple docstring''' lowercase__ : int = self.get_trainer() lowercase__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) # Callbacks passed at init are added to the default callbacks lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback]) expected_callbacks.append(SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback lowercase__ : Any = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback] lowercase__ : Tuple = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.remove(SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = self.get_trainer() lowercase__ : List[Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_) self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) trainer.add_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) # We can also add, pop, or remove by instance lowercase__ : Union[str, Any] = self.get_trainer() lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0] trainer.remove_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.remove(SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) lowercase__ : str = self.get_trainer() lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0] lowercase__ : Union[str, Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) trainer.add_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="""ignore""" , category=SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback]) trainer.train() lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) # Independent log/save/eval lowercase__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5) trainer.train() lowercase__ : List[str] = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5) trainer.train() lowercase__ : Dict = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""") trainer.train() lowercase__ : int = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) lowercase__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""") trainer.train() lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) # A bit of everything lowercase__ : Any = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , ) trainer.train() lowercase__ : str = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) # warning should be emitted for duplicated callbacks with patch("""transformers.trainer_callback.logger.warning""") as warn_mock: lowercase__ : Dict = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(SCREAMING_SNAKE_CASE_) in warn_mock.call_args[0][0]
12
1
from math import factorial lowerCamelCase__ : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)} def UpperCamelCase ( lowercase_ ) -> int: '''simple docstring''' if not isinstance(lowercase_ , lowercase_ ): raise TypeError("""Parameter number must be int""" ) if number < 0: raise ValueError("""Parameter number must be greater than or equal to 0""" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(lowercase_ ) ) def UpperCamelCase ( lowercase_ = 60 , lowercase_ = 1_00_00_00 ) -> int: '''simple docstring''' if not isinstance(lowercase_ , lowercase_ ) or not isinstance(lowercase_ , lowercase_ ): raise TypeError("""Parameters chain_length and number_limit must be int""" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( """Parameters chain_length and number_limit must be greater than 0""" ) # the counter for the chains with the exact desired length lowercase__ : str = 0 # the cached sizes of the previous chains lowercase__ : dict[int, int] = {} for start_chain_element in range(1 , lowercase_ ): # The temporary set will contain the elements of the chain lowercase__ : Dict = set() lowercase__ : Dict = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. lowercase__ : Optional[Any] = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(lowercase_ ) chain_set_length += 1 lowercase__ : Any = digit_factorial_sum(lowercase_ ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] lowercase__ : Union[str, Any] = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(f'''{solution()}''')
12
import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class _snake_case ( UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = RoCBertTokenizer __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : str = False __lowerCAmelCase : List[Any] = True __lowerCAmelCase : Optional[int] = filter_non_english def lowercase__ ( self): '''simple docstring''' super().setUp() lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""] lowercase__ : Dict = {} lowercase__ : Tuple = {} for i, value in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Tuple = i lowercase__ : Any = i lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""]) lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens])) with open(self.word_shape_file , """w""" , encoding="""utf-8""") as word_shape_writer: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_) with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""") as word_pronunciation_writer: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file) lowercase__ : Optional[int] = tokenizer.tokenize("""你好[SEP]你是谁""") self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""]) self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8]) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8]) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8]) def lowercase__ ( self): '''simple docstring''' lowercase__ : int = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""") , ["""ah""", """\u535A""", """\u63A8""", """zz"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""hello""", """!""", """how""", """are""", """you""", """?"""]) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""]) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""h\u00E9llo"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""]) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""]) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=["""[UNK]"""]) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""") , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] lowercase__ : Optional[int] = {} for i, token in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Optional[Any] = i lowercase__ : Union[str, Any] = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token="""[UNK]""") self.assertListEqual(tokenizer.tokenize("""""") , []) self.assertListEqual(tokenizer.tokenize("""unwanted running""") , ["""un""", """##want""", """##ed""", """runn""", """##ing"""]) self.assertListEqual(tokenizer.tokenize("""unwantedX running""") , ["""[UNK]""", """runn""", """##ing"""]) def lowercase__ ( self): '''simple docstring''' self.assertTrue(_is_whitespace(""" """)) self.assertTrue(_is_whitespace("""\t""")) self.assertTrue(_is_whitespace("""\r""")) self.assertTrue(_is_whitespace("""\n""")) self.assertTrue(_is_whitespace("""\u00A0""")) self.assertFalse(_is_whitespace("""A""")) self.assertFalse(_is_whitespace("""-""")) def lowercase__ ( self): '''simple docstring''' self.assertTrue(_is_control("""\u0005""")) self.assertFalse(_is_control("""A""")) self.assertFalse(_is_control(""" """)) self.assertFalse(_is_control("""\t""")) self.assertFalse(_is_control("""\r""")) def lowercase__ ( self): '''simple docstring''' self.assertTrue(_is_punctuation("""-""")) self.assertTrue(_is_punctuation("""$""")) self.assertTrue(_is_punctuation("""`""")) self.assertTrue(_is_punctuation(""".""")) self.assertFalse(_is_punctuation("""A""")) self.assertFalse(_is_punctuation(""" """)) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]]) if self.test_rust_tokenizer: lowercase__ : int = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]]) def lowercase__ ( self): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'): lowercase__ : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.' lowercase__ : List[str] = tokenizer_r.encode_plus( SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , ) lowercase__ : str = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ , """do_lower_case""") else False lowercase__ : Optional[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """A"""), ((1, 2), ""","""), ((3, 5), """na"""), ((5, 6), """##ï"""), ((6, 8), """##ve"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """Allen"""), ((21, 23), """##NL"""), ((23, 24), """##P"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """a"""), ((1, 2), ""","""), ((3, 8), """naive"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """allen"""), ((21, 23), """##nl"""), ((23, 24), """##p"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""])) self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = ["""的""", """人""", """有"""] lowercase__ : List[str] = """""".join(SCREAMING_SNAKE_CASE_) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'): lowercase__ : Union[str, Any] = True lowercase__ : Tuple = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : str = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Any = False lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_) # it is expected that only the first Chinese character is not preceded by "##". lowercase__ : Any = [ f'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_) ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) @slow def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file) lowercase__ : Optional[Any] = tokenizer.encode("""你好""" , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Any = tokenizer.encode("""你是谁""" , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}'): lowercase__ : Optional[int] = """你好,你是谁""" lowercase__ : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_) lowercase__ : Any = tokenizer.prepare_for_model( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
12
1
lowerCamelCase__ : Tuple = { """Pillow""": """Pillow""", """accelerate""": """accelerate>=0.11.0""", """compel""": """compel==0.1.8""", """black""": """black~=23.1""", """datasets""": """datasets""", """filelock""": """filelock""", """flax""": """flax>=0.4.1""", """hf-doc-builder""": """hf-doc-builder>=0.3.0""", """huggingface-hub""": """huggingface-hub>=0.13.2""", """requests-mock""": """requests-mock==1.10.0""", """importlib_metadata""": """importlib_metadata""", """invisible-watermark""": """invisible-watermark""", """isort""": """isort>=5.5.4""", """jax""": """jax>=0.2.8,!=0.3.2""", """jaxlib""": """jaxlib>=0.1.65""", """Jinja2""": """Jinja2""", """k-diffusion""": """k-diffusion>=0.0.12""", """torchsde""": """torchsde""", """note_seq""": """note_seq""", """librosa""": """librosa""", """numpy""": """numpy""", """omegaconf""": """omegaconf""", """parameterized""": """parameterized""", """protobuf""": """protobuf>=3.20.3,<4""", """pytest""": """pytest""", """pytest-timeout""": """pytest-timeout""", """pytest-xdist""": """pytest-xdist""", """ruff""": """ruff>=0.0.241""", """safetensors""": """safetensors""", """sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""", """scipy""": """scipy""", """onnx""": """onnx""", """regex""": """regex!=2019.12.17""", """requests""": """requests""", """tensorboard""": """tensorboard""", """torch""": """torch>=1.4""", """torchvision""": """torchvision""", """transformers""": """transformers>=4.25.1""", """urllib3""": """urllib3<=2.0.0""", }
12
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase_ ) class _snake_case ( UpperCAmelCase_ ): def __init__( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_) if self.framework == "tf": raise ValueError(f'The {self.__class__} is only available in PyTorch.') requires_backends(self , """vision""") self.check_model_type(SCREAMING_SNAKE_CASE_) def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' if "text_queries" in kwargs: lowercase__ : Any = kwargs.pop("""text_queries""") if isinstance(SCREAMING_SNAKE_CASE_ , (str, Image.Image)): lowercase__ : Optional[Any] = {"""image""": image, """candidate_labels""": candidate_labels} else: lowercase__ : int = image lowercase__ : List[str] = super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) return results def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = {} if "threshold" in kwargs: lowercase__ : List[Any] = kwargs["""threshold"""] if "top_k" in kwargs: lowercase__ : int = kwargs["""top_k"""] return {}, {}, postprocess_params def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : str = load_image(inputs["""image"""]) lowercase__ : Any = inputs["""candidate_labels"""] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): lowercase__ : List[str] = candidate_labels.split(""",""") lowercase__ : Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa) for i, candidate_label in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework) lowercase__ : Union[str, Any] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework) yield { "is_last": i == len(SCREAMING_SNAKE_CASE_) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : str = model_inputs.pop("""target_size""") lowercase__ : Optional[int] = model_inputs.pop("""candidate_label""") lowercase__ : Dict = model_inputs.pop("""is_last""") lowercase__ : Union[str, Any] = self.model(**SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs} return model_outputs def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=None): '''simple docstring''' lowercase__ : Union[str, Any] = [] for model_output in model_outputs: lowercase__ : Optional[int] = model_output["""candidate_label"""] lowercase__ : Tuple = BaseModelOutput(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = self.image_processor.post_process_object_detection( outputs=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ , target_sizes=model_output["""target_size"""])[0] for index in outputs["scores"].nonzero(): lowercase__ : Optional[Any] = outputs["""scores"""][index].item() lowercase__ : Optional[Any] = self._get_bounding_box(outputs["""boxes"""][index][0]) lowercase__ : Tuple = {"""score""": score, """label""": label, """box""": box} results.append(SCREAMING_SNAKE_CASE_) lowercase__ : int = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: x["score"] , reverse=SCREAMING_SNAKE_CASE_) if top_k: lowercase__ : Any = results[:top_k] return results def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' if self.framework != "pt": raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""") lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = box.int().tolist() lowercase__ : Optional[int] = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
12
1
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers lowerCamelCase__ : Union[str, Any] = """3""" print("""Python version:""", sys.version) print("""transformers version:""", transformers.__version__) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) print("""NCCL version:""", torch.cuda.nccl.version()) except ImportError: print("""Torch version:""", None) try: import deepspeed print("""DeepSpeed version:""", deepspeed.__version__) except ImportError: print("""DeepSpeed version:""", None) try: import tensorflow as tf print("""TensorFlow version:""", tf.__version__) print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU"""))) print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU"""))) except ImportError: print("""TensorFlow version:""", None)
12
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]: '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: lowercase__ : str = mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) else: lowercase__ : List[str] = max( mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) , mf_knapsack(i - 1 , lowercase_ , lowercase_ , j - wt[i - 1] ) + val[i - 1] , ) lowercase__ : List[Any] = val return f[i][j] def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str: '''simple docstring''' lowercase__ : Any = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: lowercase__ : List[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: lowercase__ : Tuple = dp[i - 1][w_] return dp[n][w_], dp def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' if not (isinstance(lowercase_ , (list, tuple) ) and isinstance(lowercase_ , (list, tuple) )): raise ValueError( """Both the weights and values vectors must be either lists or tuples""" ) lowercase__ : str = len(lowercase_ ) if num_items != len(lowercase_ ): lowercase__ : Optional[int] = ( """The number of weights must be the same as the number of values.\n""" F'But got {num_items} weights and {len(lowercase_ )} values' ) raise ValueError(lowercase_ ) for i in range(lowercase_ ): if not isinstance(wt[i] , lowercase_ ): lowercase__ : int = ( """All weights must be integers but got weight of """ F'type {type(wt[i] )} at index {i}' ) raise TypeError(lowercase_ ) lowercase__ , lowercase__ : Tuple = knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : set = set() _construct_solution(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) return optimal_val, example_optional_set def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any: '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(lowercase_ , lowercase_ , i - 1 , lowercase_ , lowercase_ ) else: optimal_set.add(lowercase_ ) _construct_solution(lowercase_ , lowercase_ , i - 1 , j - wt[i - 1] , lowercase_ ) if __name__ == "__main__": lowerCamelCase__ : Dict = [3, 2, 4, 4] lowerCamelCase__ : List[Any] = [4, 3, 2, 3] lowerCamelCase__ : Optional[int] = 4 lowerCamelCase__ : Dict = 6 lowerCamelCase__ : Optional[int] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] lowerCamelCase__ , lowerCamelCase__ : int = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 lowerCamelCase__ , lowerCamelCase__ : Optional[int] = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
12
1
def UpperCamelCase ( lowercase_ , lowercase_ ) -> bool: '''simple docstring''' return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
12
import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def UpperCamelCase ( lowercase_ ) -> Union[str, Any]: '''simple docstring''' return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def UpperCamelCase ( lowercase_ , lowercase_ ) -> List[Any]: '''simple docstring''' lowercase__ : int = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue lowercase__ : Optional[Any] = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" ) lowercase__ : Optional[Any] = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" ) lowercase__ : Optional[Any] = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" ) lowercase__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" ) lowercase__ : Optional[Any] = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" ) lowercase__ : Optional[int] = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" ) lowercase__ : List[Any] = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" ) lowercase__ : int = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" ) lowercase__ : Optional[Any] = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" ) lowercase__ : Optional[Any] = key.replace("""image_encoder.module""" , """flava.image_model""" ) lowercase__ : Any = key.replace("""text_encoder.module""" , """flava.text_model""" ) lowercase__ : Optional[Any] = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" ) lowercase__ : Tuple = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" ) lowercase__ : Any = key.replace("""text_projection""" , """flava.text_projection""" ) lowercase__ : List[Any] = key.replace("""image_projection""" , """flava.image_projection""" ) lowercase__ : str = value.float() for key, value in codebook_state_dict.items(): lowercase__ : Any = value return upgrade @torch.no_grad() def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Union[str, Any]: '''simple docstring''' if config_path is not None: lowercase__ : int = FlavaConfig.from_pretrained(lowercase_ ) else: lowercase__ : Optional[int] = FlavaConfig() lowercase__ : List[Any] = FlavaForPreTraining(lowercase_ ).eval() lowercase__ : Dict = convert_dalle_checkpoint(lowercase_ , lowercase_ , save_checkpoint=lowercase_ ) if os.path.exists(lowercase_ ): lowercase__ : Dict = torch.load(lowercase_ , map_location="""cpu""" ) else: lowercase__ : Dict = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" ) lowercase__ : int = upgrade_state_dict(lowercase_ , lowercase_ ) hf_model.load_state_dict(lowercase_ ) lowercase__ : Optional[int] = hf_model.state_dict() lowercase__ : Optional[int] = count_parameters(lowercase_ ) lowercase__ : Any = count_parameters(lowercase_ ) + count_parameters(lowercase_ ) assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) hf_model.save_pretrained(lowercase_ ) if __name__ == "__main__": lowerCamelCase__ : int = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""") parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") lowerCamelCase__ : List[str] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
12
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase__ : Optional[Any] = { """s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""", } class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : int = 'open-llama' def __init__( self , SCREAMING_SNAKE_CASE_=10_00_00 , SCREAMING_SNAKE_CASE_=40_96 , SCREAMING_SNAKE_CASE_=1_10_08 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_="silu" , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-6 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' lowercase__ : str = vocab_size lowercase__ : Tuple = max_position_embeddings lowercase__ : Tuple = hidden_size lowercase__ : Tuple = intermediate_size lowercase__ : Any = num_hidden_layers lowercase__ : Tuple = num_attention_heads lowercase__ : Any = hidden_act lowercase__ : Optional[int] = initializer_range lowercase__ : Union[str, Any] = rms_norm_eps lowercase__ : Optional[Any] = use_cache lowercase__ : Dict = kwargs.pop( """use_memorry_efficient_attention""" , SCREAMING_SNAKE_CASE_) lowercase__ : Dict = hidden_dropout_prob lowercase__ : Optional[int] = attention_dropout_prob lowercase__ : Optional[int] = use_stable_embedding lowercase__ : Dict = shared_input_output_embedding lowercase__ : str = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) def lowercase__ ( self): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE_) or len(self.rope_scaling) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f'got {self.rope_scaling}') lowercase__ : int = self.rope_scaling.get("""type""" , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = self.rope_scaling.get("""factor""" , SCREAMING_SNAKE_CASE_) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}') if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) or rope_scaling_factor <= 1.0: raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}')
12
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _snake_case ( unittest.TestCase ): def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , ): '''simple docstring''' lowercase__ : List[str] = size if size is not None else {"""height""": 18, """width""": 18} lowercase__ : int = parent lowercase__ : Union[str, Any] = batch_size lowercase__ : List[str] = num_channels lowercase__ : str = image_size lowercase__ : int = min_resolution lowercase__ : Dict = max_resolution lowercase__ : Tuple = do_resize lowercase__ : Union[str, Any] = size lowercase__ : Any = do_normalize lowercase__ : Tuple = image_mean lowercase__ : str = image_std def lowercase__ ( self): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class _snake_case ( UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Optional[Any] = ViTImageProcessor if is_vision_available() else None def lowercase__ ( self): '''simple docstring''' lowercase__ : str = EfficientFormerImageProcessorTester(self) @property def lowercase__ ( self): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_mean""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_std""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size""")) def lowercase__ ( self): '''simple docstring''' pass def lowercase__ ( self): '''simple docstring''' lowercase__ : str = self.image_processing_class(**self.image_processor_dict) # create random PIL images lowercase__ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image) # Test not batched input lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched lowercase__ : str = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors lowercase__ : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray) # Test not batched input lowercase__ : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched lowercase__ : Dict = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors lowercase__ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor) # Test not batched input lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched lowercase__ : Any = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , )
12
1
lowerCamelCase__ : dict[tuple[int, int, int], int] = {} def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on lowercase__ : Tuple = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one lowercase__ : Union[str, Any] = _calculate(days - 1 , lowercase_ , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 lowercase__ : List[str] = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter lowercase__ : Dict = _calculate(days - 1 , lowercase_ , 0 ) lowercase__ : List[str] = state_late + state_absent + state_ontime lowercase__ : List[Any] = prizestrings return prizestrings def UpperCamelCase ( lowercase_ = 30 ) -> int: '''simple docstring''' return _calculate(lowercase_ , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
12
lowerCamelCase__ : dict[tuple[int, int, int], int] = {} def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on lowercase__ : Tuple = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one lowercase__ : Union[str, Any] = _calculate(days - 1 , lowercase_ , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 lowercase__ : List[str] = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter lowercase__ : Dict = _calculate(days - 1 , lowercase_ , 0 ) lowercase__ : List[str] = state_late + state_absent + state_ontime lowercase__ : List[Any] = prizestrings return prizestrings def UpperCamelCase ( lowercase_ = 30 ) -> int: '''simple docstring''' return _calculate(lowercase_ , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
12
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class _snake_case ( unittest.TestCase ): def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = tempfile.mkdtemp() # fmt: off lowercase__ : int = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on lowercase__ : Tuple = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_)))) lowercase__ : Tuple = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] lowercase__ : Dict = {"""unk_token""": """<unk>"""} lowercase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) lowercase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_) + """\n""") with open(self.merges_file , """w""" , encoding="""utf-8""") as fp: fp.write("""\n""".join(SCREAMING_SNAKE_CASE_)) lowercase__ : Tuple = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], """image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } lowercase__ : int = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_) with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' shutil.rmtree(self.tmpdirname) def lowercase__ ( self): '''simple docstring''' lowercase__ : List[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)] lowercase__ : Dict = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1)) for x in image_inputs] return image_inputs def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = self.get_tokenizer() lowercase__ : Any = self.get_rust_tokenizer() lowercase__ : Optional[Any] = self.get_image_processor() lowercase__ : List[str] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_) processor_slow.save_pretrained(self.tmpdirname) lowercase__ : int = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_) processor_fast.save_pretrained(self.tmpdirname) lowercase__ : int = CLIPProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE_) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE_) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE_) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : List[Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) lowercase__ : str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""") lowercase__ : Dict = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0) lowercase__ : List[Any] = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = self.get_image_processor() lowercase__ : str = self.get_tokenizer() lowercase__ : Optional[Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = self.prepare_image_inputs() lowercase__ : Dict = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""np""") lowercase__ : Dict = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""np""") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = self.get_image_processor() lowercase__ : List[Any] = self.get_tokenizer() lowercase__ : Union[str, Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_) lowercase__ : str = """lower newer""" lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE_) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def lowercase__ ( self): '''simple docstring''' lowercase__ : int = self.get_image_processor() lowercase__ : List[Any] = self.get_tokenizer() lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = """lower newer""" lowercase__ : Any = self.prepare_image_inputs() lowercase__ : Tuple = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_) self.assertListEqual(list(inputs.keys()) , ["""input_ids""", """attention_mask""", """pixel_values"""]) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE_): processor() def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = self.get_image_processor() lowercase__ : Dict = self.get_tokenizer() lowercase__ : str = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_) lowercase__ : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase__ : Union[str, Any] = processor.batch_decode(SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = self.get_image_processor() lowercase__ : int = self.get_tokenizer() lowercase__ : Optional[int] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = """lower newer""" lowercase__ : Tuple = self.prepare_image_inputs() lowercase__ : Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_) self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
12
import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def UpperCamelCase ( ) -> List[Any]: '''simple docstring''' raise RuntimeError("""CUDA out of memory.""" ) class _snake_case ( nn.Module ): def __init__( self): '''simple docstring''' super().__init__() lowercase__ : Optional[Any] = nn.Linear(3 , 4) lowercase__ : Union[str, Any] = nn.BatchNormad(4) lowercase__ : str = nn.Linear(4 , 5) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE_))) class _snake_case ( unittest.TestCase ): def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = [] @find_executable_batch_size(starting_batch_size=1_28) def mock_training_loop_function(SCREAMING_SNAKE_CASE_): nonlocal batch_sizes batch_sizes.append(SCREAMING_SNAKE_CASE_) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(SCREAMING_SNAKE_CASE_ , [1_28, 64, 32, 16, 8]) def lowercase__ ( self): '''simple docstring''' lowercase__ : int = [] @find_executable_batch_size(starting_batch_size=1_28) def mock_training_loop_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): nonlocal batch_sizes batch_sizes.append(SCREAMING_SNAKE_CASE_) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga lowercase__ , lowercase__ : int = mock_training_loop_function("""hello""") self.assertListEqual(SCREAMING_SNAKE_CASE_ , [1_28, 64, 32, 16, 8]) self.assertListEqual([bs, arga] , [8, """hello"""]) def lowercase__ ( self): '''simple docstring''' @find_executable_batch_size(starting_batch_size=0) def mock_training_loop_function(SCREAMING_SNAKE_CASE_): pass with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0]) def lowercase__ ( self): '''simple docstring''' @find_executable_batch_size(starting_batch_size=16) def mock_training_loop_function(SCREAMING_SNAKE_CASE_): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0]) def lowercase__ ( self): '''simple docstring''' @find_executable_batch_size(starting_batch_size=1_28) def mock_training_loop_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm: mock_training_loop_function(1_28 , """hello""" , """world""") self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0]) self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0]) def lowercase__ ( self): '''simple docstring''' @find_executable_batch_size(starting_batch_size=16) def mock_training_loop_function(SCREAMING_SNAKE_CASE_): raise ValueError("""Oops, we had an error!""") with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm: mock_training_loop_function() self.assertIn("""Oops, we had an error!""" , cm.exception.args[0]) @require_cuda def lowercase__ ( self): '''simple docstring''' lowercase__ : str = torch.cuda.memory_allocated() lowercase__ : str = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = release_memory(SCREAMING_SNAKE_CASE_) self.assertEqual(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE_)
12
1
lowerCamelCase__ : Optional[Any] = """0.18.2""" from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
12
import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def UpperCamelCase ( lowercase_ ) -> Any: '''simple docstring''' lowercase__ : Optional[Any] = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowercase__ : List[str] = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: lowercase__ : Optional[int] = 4 lowercase__ : Optional[Any] = 48 lowercase__ : int = """pixelshuffle_aux""" elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowercase__ : List[str] = [6, 6, 6, 6] lowercase__ : Any = 60 lowercase__ : Tuple = [6, 6, 6, 6] lowercase__ : Dict = """pixelshuffledirect""" elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowercase__ : Tuple = 4 lowercase__ : Any = """nearest+conv""" elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: lowercase__ : str = 1 lowercase__ : Optional[int] = 1 lowercase__ : Optional[int] = 1_26 lowercase__ : Any = 7 lowercase__ : int = 255.0 lowercase__ : List[Any] = """""" return config def UpperCamelCase ( lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' if "patch_embed.proj" in name and "layers" not in name: lowercase__ : Dict = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: lowercase__ : Dict = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" ) if "layers" in name: lowercase__ : List[str] = name.replace("""layers""" , """encoder.stages""" ) if "residual_group.blocks" in name: lowercase__ : Optional[int] = name.replace("""residual_group.blocks""" , """layers""" ) if "attn.proj" in name: lowercase__ : int = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: lowercase__ : Tuple = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: lowercase__ : int = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: lowercase__ : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: lowercase__ : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowercase__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" ) if "q_bias" in name: lowercase__ : Any = name.replace("""q_bias""" , """query.bias""" ) if "k_bias" in name: lowercase__ : Optional[Any] = name.replace("""k_bias""" , """key.bias""" ) if "v_bias" in name: lowercase__ : Dict = name.replace("""v_bias""" , """value.bias""" ) if "cpb_mlp" in name: lowercase__ : Union[str, Any] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" ) if "patch_embed.proj" in name: lowercase__ : List[Any] = name.replace("""patch_embed.proj""" , """patch_embed.projection""" ) if name == "norm.weight": lowercase__ : Union[str, Any] = """layernorm.weight""" if name == "norm.bias": lowercase__ : List[str] = """layernorm.bias""" if "conv_first" in name: lowercase__ : Union[str, Any] = name.replace("""conv_first""" , """first_convolution""" ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: lowercase__ : List[Any] = name.replace("""conv_last""" , """final_convolution""" ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: lowercase__ : Optional[int] = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" ) if "upsample.0" in name: lowercase__ : Dict = name.replace("""upsample.0""" , """upsample.convolution_0""" ) if "upsample.2" in name: lowercase__ : Optional[Any] = name.replace("""upsample.2""" , """upsample.convolution_1""" ) lowercase__ : List[str] = """upsample.""" + name elif config.upsampler == "pixelshuffledirect": lowercase__ : Optional[Any] = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" ) lowercase__ : int = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" ) else: pass else: lowercase__ : str = """swin2sr.""" + name return name def UpperCamelCase ( lowercase_ , lowercase_ ) -> int: '''simple docstring''' for key in orig_state_dict.copy().keys(): lowercase__ : str = orig_state_dict.pop(lowercase_ ) if "qkv" in key: lowercase__ : Any = key.split(""".""" ) lowercase__ : List[Any] = int(key_split[1] ) lowercase__ : Dict = int(key_split[4] ) lowercase__ : Optional[Any] = config.embed_dim if "weight" in key: lowercase__ : List[str] = val[:dim, :] lowercase__ : List[str] = val[dim : dim * 2, :] lowercase__ : Optional[Any] = val[-dim:, :] else: lowercase__ : Optional[Any] = val[:dim] lowercase__ : List[Any] = val[dim : dim * 2] lowercase__ : Optional[int] = val[-dim:] pass else: lowercase__ : Optional[Any] = val return orig_state_dict def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' lowercase__ : Dict = get_config(lowercase_ ) lowercase__ : Any = SwinaSRForImageSuperResolution(lowercase_ ) model.eval() lowercase__ : List[str] = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" ) lowercase__ : Union[str, Any] = convert_state_dict(lowercase_ , lowercase_ ) lowercase__ , lowercase__ : Dict = model.load_state_dict(lowercase_ , strict=lowercase_ ) if len(lowercase_ ) > 0: raise ValueError("""Missing keys when converting: {}""".format(lowercase_ ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(F'Unexpected key {key} in state_dict' ) # verify values lowercase__ : Any = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true""" lowercase__ : Any = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert("""RGB""" ) lowercase__ : Any = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values lowercase__ : Optional[int] = 1_26 if """Jpeg""" in checkpoint_url else 2_56 lowercase__ : Union[str, Any] = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) lowercase__ : Dict = transforms(lowercase_ ).unsqueeze(0 ) if config.num_channels == 1: lowercase__ : Any = pixel_values[:, 0, :, :].unsqueeze(1 ) lowercase__ : Union[str, Any] = model(lowercase_ ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: lowercase__ : Optional[Any] = torch.Size([1, 3, 5_12, 5_12] ) lowercase__ : Optional[Any] = torch.tensor( [[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowercase__ : List[str] = torch.Size([1, 3, 10_24, 10_24] ) lowercase__ : int = torch.tensor( [[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here lowercase__ : Optional[Any] = torch.Size([1, 3, 10_24, 10_24] ) lowercase__ : int = torch.tensor( [[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowercase__ : Tuple = torch.Size([1, 3, 5_12, 5_12] ) lowercase__ : int = torch.tensor( [[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowercase__ : Tuple = torch.Size([1, 3, 10_24, 10_24] ) lowercase__ : int = torch.tensor( [[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] ) assert ( outputs.reconstruction.shape == expected_shape ), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowercase_ , atol=1E-3 ) print("""Looks ok!""" ) lowercase__ : str = { """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": ( """swin2SR-classical-sr-x2-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": ( """swin2SR-classical-sr-x4-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": ( """swin2SR-compressed-sr-x4-48""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": ( """swin2SR-lightweight-x2-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": ( """swin2SR-realworld-sr-x4-64-bsrgan-psnr""" ), } lowercase__ : str = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowercase_ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(lowercase_ ) if push_to_hub: model.push_to_hub(F'caidas/{model_name}' ) processor.push_to_hub(F'caidas/{model_name}' ) if __name__ == "__main__": lowerCamelCase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""", type=str, help="""URL of the original Swin2SR checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""") lowerCamelCase__ : Any = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
12
1
import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration lowerCamelCase__ : List[Any] = pytest.mark.integration lowerCamelCase__ : Dict = {"""comet"""} lowerCamelCase__ : Optional[int] = importlib.util.find_spec("""fairseq""") is not None lowerCamelCase__ : Tuple = {"""code_eval"""} lowerCamelCase__ : str = os.name == """nt""" lowerCamelCase__ : int = {"""bertscore""", """frugalscore""", """perplexity"""} lowerCamelCase__ : Tuple = importlib.util.find_spec("""transformers""") is not None def UpperCamelCase ( lowercase_ ) -> Optional[Any]: '''simple docstring''' @wraps(lowercase_ ) def wrapper(self , lowercase_ ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest("""\"test requires Fairseq\"""" ) else: test_case(self , lowercase_ ) return wrapper def UpperCamelCase ( lowercase_ ) -> Any: '''simple docstring''' @wraps(lowercase_ ) def wrapper(self , lowercase_ ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest("""\"test requires transformers\"""" ) else: test_case(self , lowercase_ ) return wrapper def UpperCamelCase ( lowercase_ ) -> Any: '''simple docstring''' @wraps(lowercase_ ) def wrapper(self , lowercase_ ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest("""\"test not supported on Windows\"""" ) else: test_case(self , lowercase_ ) return wrapper def UpperCamelCase ( ) -> List[str]: '''simple docstring''' lowercase__ : Union[str, Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) @local class _snake_case ( parameterized.TestCase ): __lowerCAmelCase : Any = {} __lowerCAmelCase : str = None @pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""") @pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : int = """[...]""" lowercase__ : Optional[Any] = importlib.import_module( datasets.load.metric_module_factory(os.path.join("""metrics""" , SCREAMING_SNAKE_CASE_)).module_path) lowercase__ : List[str] = datasets.load.import_main_class(metric_module.__name__ , dataset=SCREAMING_SNAKE_CASE_) # check parameters lowercase__ : Any = inspect.signature(metric._compute).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs # run doctest with self.patch_intensive_calls(SCREAMING_SNAKE_CASE_ , metric_module.__name__): with self.use_local_metrics(): try: lowercase__ : int = doctest.testmod(SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , raise_on_error=SCREAMING_SNAKE_CASE_) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0) self.assertGreater(results.attempted , 1) @slow def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Optional[int] = """[...]""" lowercase__ : Union[str, Any] = importlib.import_module( datasets.load.metric_module_factory(os.path.join("""metrics""" , SCREAMING_SNAKE_CASE_)).module_path) # run doctest with self.use_local_metrics(): lowercase__ : Union[str, Any] = doctest.testmod(SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , raise_on_error=SCREAMING_SNAKE_CASE_) self.assertEqual(results.failed , 0) self.assertGreater(results.attempted , 1) @contextmanager def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](SCREAMING_SNAKE_CASE_): yield else: yield @contextmanager def lowercase__ ( self): '''simple docstring''' def load_local_metric(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): return load_metric(os.path.join("""metrics""" , SCREAMING_SNAKE_CASE_) , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) with patch("""datasets.load_metric""") as mock_load_metric: lowercase__ : Union[str, Any] = load_local_metric yield @classmethod def lowercase__ ( cls , SCREAMING_SNAKE_CASE_): '''simple docstring''' def wrapper(SCREAMING_SNAKE_CASE_): lowercase__ : Any = contextmanager(SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher("""bleurt""" ) def UpperCamelCase ( lowercase_ ) -> Optional[int]: '''simple docstring''' import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags class _snake_case ( UpperCAmelCase_ ): def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' assert len(input_dict["""input_ids"""]) == 2 return np.array([1.0_3, 1.0_4]) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor: lowercase__ : str = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher("""bertscore""" ) def UpperCamelCase ( lowercase_ ) -> int: '''simple docstring''' import torch def bert_cos_score_idf(lowercase_ , lowercase_ , *lowercase_ , **lowercase_ ): return torch.tensor([[1.0, 1.0, 1.0]] * len(lowercase_ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch("""bert_score.scorer.get_model""" ), patch( """bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf: lowercase__ : int = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher("""comet""" ) def UpperCamelCase ( lowercase_ ) -> Dict: '''simple docstring''' def load_from_checkpoint(lowercase_ ): class _snake_case : def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' assert len(SCREAMING_SNAKE_CASE_) == 2 lowercase__ : Optional[Any] = [0.1_9, 0.9_2] return scores, sum(SCREAMING_SNAKE_CASE_) / len(SCREAMING_SNAKE_CASE_) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch("""comet.download_model""" ) as mock_download_model: lowercase__ : Union[str, Any] = None with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint: lowercase__ : List[Any] = load_from_checkpoint yield def UpperCamelCase ( ) -> List[Any]: '''simple docstring''' lowercase__ : Optional[Any] = load_metric(os.path.join("""metrics""" , """seqeval""" ) ) lowercase__ : int = """ERROR""" lowercase__ : Tuple = F'Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}' with pytest.raises(lowercase_ , match=re.escape(lowercase_ ) ): metric.compute(predictions=[] , references=[] , scheme=lowercase_ )
12
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : BigBirdConfig __lowerCAmelCase : jnp.dtype = jnp.floataa __lowerCAmelCase : bool = True def lowercase__ ( self): '''simple docstring''' super().setup() lowercase__ : Dict = nn.Dense(5 , dtype=self.dtype) def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : List[str] = super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = self.cls(outputs[2]) return outputs[:2] + (cls_out,) class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Optional[int] = FlaxBigBirdForNaturalQuestionsModule def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' def cross_entropy(lowercase_ , lowercase_ , lowercase_=None ): lowercase__ : int = logits.shape[-1] lowercase__ : List[str] = (labels[..., None] == jnp.arange(lowercase_ )[None]).astype("""f4""" ) lowercase__ : int = jax.nn.log_softmax(lowercase_ , axis=-1 ) lowercase__ : Any = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: lowercase__ : Optional[int] = reduction(lowercase_ ) return loss lowercase__ : int = partial(lowercase_ , reduction=jnp.mean ) lowercase__ : Tuple = cross_entropy(lowercase_ , lowercase_ ) lowercase__ : List[Any] = cross_entropy(lowercase_ , lowercase_ ) lowercase__ : Union[str, Any] = cross_entropy(lowercase_ , lowercase_ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class _snake_case : __lowerCAmelCase : str = "google/bigbird-roberta-base" __lowerCAmelCase : int = 3_000 __lowerCAmelCase : int = 10_500 __lowerCAmelCase : int = 128 __lowerCAmelCase : int = 3 __lowerCAmelCase : int = 1 __lowerCAmelCase : int = 5 # tx_args __lowerCAmelCase : float = 3e-5 __lowerCAmelCase : float = 0.0 __lowerCAmelCase : int = 20_000 __lowerCAmelCase : float = 0.0_095 __lowerCAmelCase : str = "bigbird-roberta-natural-questions" __lowerCAmelCase : str = "training-expt" __lowerCAmelCase : str = "data/nq-training.jsonl" __lowerCAmelCase : str = "data/nq-validation.jsonl" def lowercase__ ( self): '''simple docstring''' os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE_) lowercase__ : Any = os.path.join(self.base_dir , self.save_dir) lowercase__ : str = self.batch_size_per_device * jax.device_count() @dataclass class _snake_case : __lowerCAmelCase : int __lowerCAmelCase : int = 4_096 # no dynamic padding on TPUs def __call__( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Dict = self.collate_fn(SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) return batch def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ , lowercase__ : str = self.fetch_inputs(features["""input_ids"""]) lowercase__ : str = { """input_ids""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa), """attention_mask""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa), """start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa), """end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa), """pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa), } return batch def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : List[Any] = [self._fetch_inputs(SCREAMING_SNAKE_CASE_) for ids in input_ids] return zip(*SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = [1 for _ in range(len(SCREAMING_SNAKE_CASE_))] while len(SCREAMING_SNAKE_CASE_) < self.max_length: input_ids.append(self.pad_id) attention_mask.append(0) return input_ids, attention_mask def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=None ) -> Optional[Any]: '''simple docstring''' if seed is not None: lowercase__ : Any = dataset.shuffle(seed=lowercase_ ) for i in range(len(lowercase_ ) // batch_size ): lowercase__ : List[str] = dataset[i * batch_size : (i + 1) * batch_size] yield dict(lowercase_ ) @partial(jax.pmap , axis_name="""batch""" ) def UpperCamelCase ( lowercase_ , lowercase_ , **lowercase_ ) -> int: '''simple docstring''' def loss_fn(lowercase_ ): lowercase__ : Dict = model_inputs.pop("""start_labels""" ) lowercase__ : List[Any] = model_inputs.pop("""end_labels""" ) lowercase__ : List[Any] = model_inputs.pop("""pooled_labels""" ) lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=lowercase_ , dropout_rng=lowercase_ , train=lowercase_ ) lowercase__ , lowercase__ , lowercase__ : Any = outputs return state.loss_fn( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) lowercase__ , lowercase__ : Optional[int] = jax.random.split(lowercase_ ) lowercase__ : Tuple = jax.value_and_grad(lowercase_ ) lowercase__ , lowercase__ : Optional[int] = grad_fn(state.params ) lowercase__ : Tuple = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) lowercase__ : Any = jax.lax.pmean(lowercase_ , """batch""" ) lowercase__ : str = state.apply_gradients(grads=lowercase_ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="""batch""" ) def UpperCamelCase ( lowercase_ , **lowercase_ ) -> str: '''simple docstring''' lowercase__ : Tuple = model_inputs.pop("""start_labels""" ) lowercase__ : List[str] = model_inputs.pop("""end_labels""" ) lowercase__ : int = model_inputs.pop("""pooled_labels""" ) lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=state.params , train=lowercase_ ) lowercase__ , lowercase__ , lowercase__ : Optional[int] = outputs lowercase__ : Optional[Any] = state.loss_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : List[str] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) return metrics class _snake_case ( train_state.TrainState ): __lowerCAmelCase : Callable = struct.field(pytree_node=UpperCAmelCase_ ) @dataclass class _snake_case : __lowerCAmelCase : Args __lowerCAmelCase : Callable __lowerCAmelCase : Callable __lowerCAmelCase : Callable __lowerCAmelCase : Callable __lowerCAmelCase : wandb __lowerCAmelCase : Callable = None def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None): '''simple docstring''' lowercase__ : List[str] = model.params lowercase__ : Dict = TrainState.create( apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , loss_fn=SCREAMING_SNAKE_CASE_ , ) if ckpt_dir is not None: lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = restore_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : str = { """lr""": args.lr, """init_lr""": args.init_lr, """warmup_steps""": args.warmup_steps, """num_train_steps""": num_train_steps, """weight_decay""": args.weight_decay, } lowercase__ , lowercase__ : Any = build_tx(**SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = train_state.TrainState( step=SCREAMING_SNAKE_CASE_ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , opt_state=SCREAMING_SNAKE_CASE_ , ) lowercase__ : Optional[Any] = args lowercase__ : Union[str, Any] = data_collator lowercase__ : str = lr lowercase__ : Union[str, Any] = params lowercase__ : Dict = jax_utils.replicate(SCREAMING_SNAKE_CASE_) return state def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = self.args lowercase__ : List[str] = len(SCREAMING_SNAKE_CASE_) // args.batch_size lowercase__ : int = jax.random.PRNGKey(0) lowercase__ : Union[str, Any] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count()) for epoch in range(args.max_epochs): lowercase__ : Tuple = jnp.array(0 , dtype=jnp.floataa) lowercase__ : List[str] = get_batched_dataset(SCREAMING_SNAKE_CASE_ , args.batch_size , seed=SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = 0 for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc=f'Running EPOCH-{epoch}'): lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_) lowercase__ , lowercase__ , lowercase__ : List[Any] = self.train_step_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) running_loss += jax_utils.unreplicate(metrics["""loss"""]) i += 1 if i % args.logging_steps == 0: lowercase__ : List[str] = jax_utils.unreplicate(state.step) lowercase__ : str = running_loss.item() / i lowercase__ : Tuple = self.scheduler_fn(state_step - 1) lowercase__ : Tuple = self.evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = { """step""": state_step.item(), """eval_loss""": eval_loss.item(), """tr_loss""": tr_loss, """lr""": lr.item(), } tqdm.write(str(SCREAMING_SNAKE_CASE_)) self.logger.log(SCREAMING_SNAKE_CASE_ , commit=SCREAMING_SNAKE_CASE_) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Dict = get_batched_dataset(SCREAMING_SNAKE_CASE_ , self.args.batch_size) lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE_) // self.args.batch_size lowercase__ : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa) lowercase__ : Optional[Any] = 0 for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc="""Evaluating ... """): lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = self.val_step_fn(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) running_loss += jax_utils.unreplicate(metrics["""loss"""]) i += 1 return running_loss / i def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = jax_utils.unreplicate(SCREAMING_SNAKE_CASE_) print(f'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """) self.model_save_fn(SCREAMING_SNAKE_CASE_ , params=state.params) with open(os.path.join(SCREAMING_SNAKE_CASE_ , """opt_state.msgpack""") , """wb""") as f: f.write(to_bytes(state.opt_state)) joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE_ , """args.joblib""")) joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE_ , """data_collator.joblib""")) with open(os.path.join(SCREAMING_SNAKE_CASE_ , """training_state.json""") , """w""") as f: json.dump({"""step""": state.step.item()} , SCREAMING_SNAKE_CASE_) print("""DONE""") def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ ) with open(os.path.join(lowercase_ , """flax_model.msgpack""" ) , """rb""" ) as f: lowercase__ : Optional[Any] = from_bytes(state.params , f.read() ) with open(os.path.join(lowercase_ , """opt_state.msgpack""" ) , """rb""" ) as f: lowercase__ : Dict = from_bytes(state.opt_state , f.read() ) lowercase__ : Any = joblib.load(os.path.join(lowercase_ , """args.joblib""" ) ) lowercase__ : Optional[int] = joblib.load(os.path.join(lowercase_ , """data_collator.joblib""" ) ) with open(os.path.join(lowercase_ , """training_state.json""" ) , """r""" ) as f: lowercase__ : int = json.load(lowercase_ ) lowercase__ : Optional[Any] = training_state["""step"""] print("""DONE""" ) return params, opt_state, step, args, data_collator def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' lowercase__ : Optional[int] = num_train_steps - warmup_steps lowercase__ : int = optax.linear_schedule(init_value=lowercase_ , end_value=lowercase_ , transition_steps=lowercase_ ) lowercase__ : Optional[int] = optax.linear_schedule(init_value=lowercase_ , end_value=1E-7 , transition_steps=lowercase_ ) lowercase__ : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]: '''simple docstring''' def weight_decay_mask(lowercase_ ): lowercase__ : Dict = traverse_util.flatten_dict(lowercase_ ) lowercase__ : int = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()} return traverse_util.unflatten_dict(lowercase_ ) lowercase__ : Optional[int] = scheduler_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : int = optax.adamw(learning_rate=lowercase_ , weight_decay=lowercase_ , mask=lowercase_ ) return tx, lr
12
1
lowerCamelCase__ : Dict = { """a""": """AAAAA""", """b""": """AAAAB""", """c""": """AAABA""", """d""": """AAABB""", """e""": """AABAA""", """f""": """AABAB""", """g""": """AABBA""", """h""": """AABBB""", """i""": """ABAAA""", """j""": """BBBAA""", """k""": """ABAAB""", """l""": """ABABA""", """m""": """ABABB""", """n""": """ABBAA""", """o""": """ABBAB""", """p""": """ABBBA""", """q""": """ABBBB""", """r""": """BAAAA""", """s""": """BAAAB""", """t""": """BAABA""", """u""": """BAABB""", """v""": """BBBAB""", """w""": """BABAA""", """x""": """BABAB""", """y""": """BABBA""", """z""": """BABBB""", """ """: """ """, } lowerCamelCase__ : int = {value: key for key, value in encode_dict.items()} def UpperCamelCase ( lowercase_ ) -> str: '''simple docstring''' lowercase__ : Union[str, Any] = """""" for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception("""encode() accepts only letters of the alphabet and spaces""" ) return encoded def UpperCamelCase ( lowercase_ ) -> str: '''simple docstring''' if set(lowercase_ ) - {"A", "B", " "} != set(): raise Exception("""decode() accepts only 'A', 'B' and spaces""" ) lowercase__ : str = """""" for word in coded.split(): while len(lowercase_ ) != 0: decoded += decode_dict[word[:5]] lowercase__ : Tuple = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
12
lowerCamelCase__ : List[str] = """ # Installazione di Transformers ! pip install transformers datasets # Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git """ lowerCamelCase__ : List[Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowerCamelCase__ : int = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
12
1
import glob import os import random from string import ascii_lowercase, digits import cva lowerCamelCase__ : str = """""" lowerCamelCase__ : Tuple = """""" lowerCamelCase__ : int = """""" lowerCamelCase__ : Optional[Any] = 1 # (0 is vertical, 1 is horizontal) def UpperCamelCase ( ) -> None: '''simple docstring''' lowercase__ , lowercase__ : Union[str, Any] = get_dataset(lowercase_ , lowercase_ ) print("""Processing...""" ) lowercase__ , lowercase__ , lowercase__ : str = update_image_and_anno(lowercase_ , lowercase_ , lowercase_ ) for index, image in enumerate(lowercase_ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' lowercase__ : Any = random_chars(32 ) lowercase__ : Tuple = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0] lowercase__ : List[str] = F'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}' cva.imwrite(F'/{file_root}.jpg' , lowercase_ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F'Success {index+1}/{len(lowercase_ )} with {file_name}' ) lowercase__ : Dict = [] for anno in new_annos[index]: lowercase__ : List[str] = F'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}' annos_list.append(lowercase_ ) with open(F'/{file_root}.txt' , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def UpperCamelCase ( lowercase_ , lowercase_ ) -> tuple[list, list]: '''simple docstring''' lowercase__ : List[Any] = [] lowercase__ : List[Any] = [] for label_file in glob.glob(os.path.join(lowercase_ , """*.txt""" ) ): lowercase__ : Union[str, Any] = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(lowercase_ ) as in_file: lowercase__ : Any = in_file.readlines() lowercase__ : List[str] = os.path.join(lowercase_ , F'{label_name}.jpg' ) lowercase__ : Tuple = [] for obj_list in obj_lists: lowercase__ : Tuple = obj_list.rstrip("""\n""" ).split(""" """ ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(lowercase_ ) labels.append(lowercase_ ) return img_paths, labels def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ = 1 ) -> tuple[list, list, list]: '''simple docstring''' lowercase__ : Optional[int] = [] lowercase__ : Dict = [] lowercase__ : Optional[int] = [] for idx in range(len(lowercase_ ) ): lowercase__ : Tuple = [] lowercase__ : Optional[Any] = img_list[idx] path_list.append(lowercase_ ) lowercase__ : List[Any] = anno_list[idx] lowercase__ : Union[str, Any] = cva.imread(lowercase_ ) if flip_type == 1: lowercase__ : int = cva.flip(lowercase_ , lowercase_ ) for bbox in img_annos: lowercase__ : Any = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: lowercase__ : Any = cva.flip(lowercase_ , lowercase_ ) for bbox in img_annos: lowercase__ : List[str] = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(lowercase_ ) new_imgs_list.append(lowercase_ ) return new_imgs_list, new_annos_lists, path_list def UpperCamelCase ( lowercase_ = 32 ) -> str: '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" lowercase__ : Dict = ascii_lowercase + digits return "".join(random.choice(lowercase_ ) for _ in range(lowercase_ ) ) if __name__ == "__main__": main() print("""DONE ✅""")
12
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class _snake_case : def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ): '''simple docstring''' lowercase__ : str = parent lowercase__ : Optional[int] = batch_size lowercase__ : Optional[int] = seq_length lowercase__ : Union[str, Any] = is_training lowercase__ : Any = use_input_mask lowercase__ : Optional[int] = use_token_type_ids lowercase__ : Optional[Any] = use_labels lowercase__ : Optional[int] = vocab_size lowercase__ : Optional[Any] = hidden_size lowercase__ : Any = rotary_dim lowercase__ : Optional[Any] = num_hidden_layers lowercase__ : Tuple = num_attention_heads lowercase__ : Tuple = intermediate_size lowercase__ : List[str] = hidden_act lowercase__ : Optional[Any] = hidden_dropout_prob lowercase__ : int = attention_probs_dropout_prob lowercase__ : Any = max_position_embeddings lowercase__ : Optional[int] = initializer_range lowercase__ : Optional[int] = None lowercase__ : str = vocab_size - 1 lowercase__ : Any = vocab_size - 1 lowercase__ : Dict = vocab_size - 1 def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ : Any = None if self.use_input_mask: lowercase__ : Dict = random_attention_mask([self.batch_size, self.seq_length]) lowercase__ : List[Any] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Optional[Any] = config_and_inputs lowercase__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = 20 lowercase__ : int = model_class_name(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_) lowercase__ : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""") lowercase__ : Tuple = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1)) lowercase__ : List[str] = model( input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""") lowercase__ : str = model( input_ids[:, -1:] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE_) lowercase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}') def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Union[str, Any] = 20 lowercase__ : List[Any] = model_class_name(SCREAMING_SNAKE_CASE_) lowercase__ : Dict = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , ) lowercase__ : Dict = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1)) lowercase__ : Any = model( input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""") lowercase__ : Tuple = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_) lowercase__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}') @require_flax class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Dict = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __lowerCAmelCase : str = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = FlaxGPTJModelTester(self) def lowercase__ ( self): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__ , lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__ , lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) @tooslow def lowercase__ ( self): '''simple docstring''' lowercase__ : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""") lowercase__ : List[str] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""") lowercase__ : Optional[Any] = False lowercase__ : List[str] = model.config.eos_token_id lowercase__ : List[Any] = jax.jit(model.generate) lowercase__ : Tuple = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id).sequences lowercase__ : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) @is_pt_flax_cross_test def lowercase__ ( self): '''simple docstring''' lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Any = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning lowercase__ : str = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ , lowercase__ : Dict = pt_inputs["""input_ids"""].shape lowercase__ : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,)) for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : str = 0 lowercase__ : List[Any] = 1 lowercase__ : Dict = 0 lowercase__ : Any = 1 lowercase__ : List[Any] = pt_model_class(SCREAMING_SNAKE_CASE_).eval() lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa) lowercase__ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = fx_state with torch.no_grad(): lowercase__ : Optional[int] = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple() lowercase__ : Dict = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_) lowercase__ : str = fx_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual( len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output_loaded, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2) @is_pt_flax_cross_test def lowercase__ ( self): '''simple docstring''' lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs lowercase__ : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : str = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning lowercase__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : str = pt_model_class(SCREAMING_SNAKE_CASE_).eval() lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa) lowercase__ : Optional[int] = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , fx_model.params) lowercase__ , lowercase__ : str = pt_inputs["""input_ids"""].shape lowercase__ : List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,)) for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Tuple = 0 lowercase__ : int = 1 lowercase__ : str = 0 lowercase__ : str = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): lowercase__ : Dict = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple() lowercase__ : Optional[Any] = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = pt_model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_flax=SCREAMING_SNAKE_CASE_) with torch.no_grad(): lowercase__ : Tuple = pt_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual( len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) @tooslow def lowercase__ ( self): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__ : Any = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""") lowercase__ : int = model(np.ones((1, 1))) self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
12
1
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""") lowerCamelCase__ : str = logging.getLogger(__name__) @dataclass class _snake_case : __lowerCAmelCase : Optional[int] = field( default=128 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __lowerCAmelCase : bool = field( default=UpperCAmelCase_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} ) __lowerCAmelCase : bool = field( default=UpperCAmelCase_ , metadata={ 'help': ( 'Whether to pad all samples to `max_seq_length`. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch.' ) } , ) __lowerCAmelCase : Optional[int] = field( default=UpperCAmelCase_ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) __lowerCAmelCase : Optional[int] = field( default=UpperCAmelCase_ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) __lowerCAmelCase : Optional[int] = field( default=UpperCAmelCase_ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of prediction examples to this ' 'value if set.' ) } , ) @dataclass class _snake_case : __lowerCAmelCase : str = field( default=UpperCAmelCase_ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) __lowerCAmelCase : str = field( default=UpperCAmelCase_ , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} ) __lowerCAmelCase : Optional[str] = field( default=UpperCAmelCase_ , metadata={'help': 'Train language if it is different from the evaluation language.'} ) __lowerCAmelCase : Optional[str] = field( default=UpperCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __lowerCAmelCase : Optional[str] = field( default=UpperCAmelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) __lowerCAmelCase : Optional[str] = field( default=UpperCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) __lowerCAmelCase : Optional[bool] = field( default=UpperCAmelCase_ , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , ) __lowerCAmelCase : bool = field( default=UpperCAmelCase_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , ) __lowerCAmelCase : str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) __lowerCAmelCase : bool = field( default=UpperCAmelCase_ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) __lowerCAmelCase : bool = field( default=UpperCAmelCase_ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , ) def UpperCamelCase ( ) -> Union[str, Any]: '''simple docstring''' lowercase__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowercase__ , lowercase__ , lowercase__ : Optional[int] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_xnli""" , lowercase_ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowercase__ : int = training_args.get_process_log_level() logger.setLevel(lowercase_ ) datasets.utils.logging.set_verbosity(lowercase_ ) transformers.utils.logging.set_verbosity(lowercase_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. lowercase__ : List[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase__ : Any = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # Downloading and loading xnli dataset from the hub. if training_args.do_train: if model_args.train_language is None: lowercase__ : Union[str, Any] = load_dataset( """xnli""" , model_args.language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: lowercase__ : Dict = load_dataset( """xnli""" , model_args.train_language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) lowercase__ : List[str] = train_dataset.features["""label"""].names if training_args.do_eval: lowercase__ : Optional[Any] = load_dataset( """xnli""" , model_args.language , split="""validation""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) lowercase__ : Tuple = eval_dataset.features["""label"""].names if training_args.do_predict: lowercase__ : List[Any] = load_dataset( """xnli""" , model_args.language , split="""test""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) lowercase__ : Tuple = predict_dataset.features["""label"""].names # Labels lowercase__ : List[str] = len(lowercase_ ) # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase__ : List[str] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase_ , idalabel={str(lowercase_ ): label for i, label in enumerate(lowercase_ )} , labelaid={label: i for i, label in enumerate(lowercase_ )} , finetuning_task="""xnli""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowercase__ : str = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowercase__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # Preprocessing the datasets # Padding strategy if data_args.pad_to_max_length: lowercase__ : str = """max_length""" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch lowercase__ : Any = False def preprocess_function(lowercase_ ): # Tokenize the texts return tokenizer( examples["""premise"""] , examples["""hypothesis"""] , padding=lowercase_ , max_length=data_args.max_seq_length , truncation=lowercase_ , ) if training_args.do_train: if data_args.max_train_samples is not None: lowercase__ : int = min(len(lowercase_ ) , data_args.max_train_samples ) lowercase__ : Optional[Any] = train_dataset.select(range(lowercase_ ) ) with training_args.main_process_first(desc="""train dataset map pre-processing""" ): lowercase__ : Any = train_dataset.map( lowercase_ , batched=lowercase_ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on train dataset""" , ) # Log a few random samples from the training set: for index in random.sample(range(len(lowercase_ ) ) , 3 ): logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' ) if training_args.do_eval: if data_args.max_eval_samples is not None: lowercase__ : int = min(len(lowercase_ ) , data_args.max_eval_samples ) lowercase__ : Union[str, Any] = eval_dataset.select(range(lowercase_ ) ) with training_args.main_process_first(desc="""validation dataset map pre-processing""" ): lowercase__ : Dict = eval_dataset.map( lowercase_ , batched=lowercase_ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on validation dataset""" , ) if training_args.do_predict: if data_args.max_predict_samples is not None: lowercase__ : int = min(len(lowercase_ ) , data_args.max_predict_samples ) lowercase__ : Optional[int] = predict_dataset.select(range(lowercase_ ) ) with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ): lowercase__ : List[Any] = predict_dataset.map( lowercase_ , batched=lowercase_ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on prediction dataset""" , ) # Get the metric function lowercase__ : Union[str, Any] = evaluate.load("""xnli""" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(lowercase_ ): lowercase__ : str = p.predictions[0] if isinstance(p.predictions , lowercase_ ) else p.predictions lowercase__ : Optional[int] = np.argmax(lowercase_ , axis=1 ) return metric.compute(predictions=lowercase_ , references=p.label_ids ) # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: lowercase__ : List[str] = default_data_collator elif training_args.fpaa: lowercase__ : Any = DataCollatorWithPadding(lowercase_ , pad_to_multiple_of=8 ) else: lowercase__ : Optional[Any] = None # Initialize our Trainer lowercase__ : List[Any] = Trainer( model=lowercase_ , args=lowercase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , data_collator=lowercase_ , ) # Training if training_args.do_train: lowercase__ : str = None if training_args.resume_from_checkpoint is not None: lowercase__ : Tuple = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase__ : Optional[int] = last_checkpoint lowercase__ : str = trainer.train(resume_from_checkpoint=lowercase_ ) lowercase__ : Union[str, Any] = train_result.metrics lowercase__ : Optional[Any] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase_ ) ) lowercase__ : str = min(lowercase_ , len(lowercase_ ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , lowercase_ ) trainer.save_metrics("""train""" , lowercase_ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) lowercase__ : Union[str, Any] = trainer.evaluate(eval_dataset=lowercase_ ) lowercase__ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase_ ) lowercase__ : int = min(lowercase_ , len(lowercase_ ) ) trainer.log_metrics("""eval""" , lowercase_ ) trainer.save_metrics("""eval""" , lowercase_ ) # Prediction if training_args.do_predict: logger.info("""*** Predict ***""" ) lowercase__ , lowercase__ , lowercase__ : Optional[int] = trainer.predict(lowercase_ , metric_key_prefix="""predict""" ) lowercase__ : str = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowercase_ ) ) lowercase__ : Tuple = min(lowercase_ , len(lowercase_ ) ) trainer.log_metrics("""predict""" , lowercase_ ) trainer.save_metrics("""predict""" , lowercase_ ) lowercase__ : str = np.argmax(lowercase_ , axis=1 ) lowercase__ : Any = os.path.join(training_args.output_dir , """predictions.txt""" ) if trainer.is_world_process_zero(): with open(lowercase_ , """w""" ) as writer: writer.write("""index\tprediction\n""" ) for index, item in enumerate(lowercase_ ): lowercase__ : Any = label_list[item] writer.write(F'{index}\t{item}\n' ) if __name__ == "__main__": main()
12
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Any = ['image_processor', 'tokenizer'] __lowerCAmelCase : Union[str, Any] = 'AutoImageProcessor' __lowerCAmelCase : int = 'AutoTokenizer' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = self.image_processor def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""") if text is not None: lowercase__ : List[str] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) if images is not None: lowercase__ : Optional[int] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) if text is not None and images is not None: lowercase__ : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_) , tensor_type=SCREAMING_SNAKE_CASE_) def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) @property def lowercase__ ( self): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
12
1
import itertools import string from collections.abc import Generator, Iterable def UpperCamelCase ( lowercase_ , lowercase_ ) -> Generator[tuple[str, ...], None, None]: '''simple docstring''' lowercase__ : Any = iter(lowercase_ ) while True: lowercase__ : Any = tuple(itertools.islice(lowercase_ , lowercase_ ) ) if not chunk: return yield chunk def UpperCamelCase ( lowercase_ ) -> str: '''simple docstring''' lowercase__ : Union[str, Any] = """""".join([c.upper() for c in dirty if c in string.ascii_letters] ) lowercase__ : Union[str, Any] = """""" if len(lowercase_ ) < 2: return dirty for i in range(len(lowercase_ ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(lowercase_ ) & 1: clean += "X" return clean def UpperCamelCase ( lowercase_ ) -> list[str]: '''simple docstring''' lowercase__ : Optional[Any] = """ABCDEFGHIKLMNOPQRSTUVWXYZ""" # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler lowercase__ : List[Any] = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(lowercase_ ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(lowercase_ ) return table def UpperCamelCase ( lowercase_ , lowercase_ ) -> str: '''simple docstring''' lowercase__ : str = generate_table(lowercase_ ) lowercase__ : Optional[int] = prepare_input(lowercase_ ) lowercase__ : Optional[int] = """""" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(lowercase_ , 2 ): lowercase__ , lowercase__ : Dict = divmod(table.index(lowercase_ ) , 5 ) lowercase__ , lowercase__ : Optional[int] = divmod(table.index(lowercase_ ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def UpperCamelCase ( lowercase_ , lowercase_ ) -> str: '''simple docstring''' lowercase__ : int = generate_table(lowercase_ ) lowercase__ : Tuple = """""" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(lowercase_ , 2 ): lowercase__ , lowercase__ : Optional[int] = divmod(table.index(lowercase_ ) , 5 ) lowercase__ , lowercase__ : int = divmod(table.index(lowercase_ ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
12
def UpperCamelCase ( lowercase_ ) -> int: '''simple docstring''' if n == 1 or not isinstance(lowercase_ , lowercase_ ): return 0 elif n == 2: return 1 else: lowercase__ : List[Any] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def UpperCamelCase ( lowercase_ ) -> int: '''simple docstring''' lowercase__ : Optional[Any] = 0 lowercase__ : Dict = 2 while digits < n: index += 1 lowercase__ : str = len(str(fibonacci(lowercase_ ) ) ) return index def UpperCamelCase ( lowercase_ = 10_00 ) -> int: '''simple docstring''' return fibonacci_digits_index(lowercase_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
12
1
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowerCamelCase__ : Tuple = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ lowerCamelCase__ : Union[str, Any] = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ lowerCamelCase__ : Optional[int] = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): def lowercase__ ( self): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence"""), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence""") , id="""references"""), }) , ) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 4 , ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=SCREAMING_SNAKE_CASE_ , hypotheses=SCREAMING_SNAKE_CASE_ , min_len=SCREAMING_SNAKE_CASE_ , max_len=SCREAMING_SNAKE_CASE_) }
12
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter lowerCamelCase__ : Any = """Create a default config file for Accelerate with only a few flags set.""" def UpperCamelCase ( lowercase_="no" , lowercase_ = default_json_config_file , lowercase_ = False ) -> Any: '''simple docstring''' lowercase__ : Any = Path(lowercase_ ) path.parent.mkdir(parents=lowercase_ , exist_ok=lowercase_ ) if path.exists(): print( F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' ) return False lowercase__ : int = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' ) lowercase__ : Dict = { """compute_environment""": """LOCAL_MACHINE""", """mixed_precision""": mixed_precision, } if torch.cuda.is_available(): lowercase__ : Any = torch.cuda.device_count() lowercase__ : Any = num_gpus lowercase__ : Optional[int] = False if num_gpus > 1: lowercase__ : Tuple = """MULTI_GPU""" else: lowercase__ : Optional[Any] = """NO""" elif is_xpu_available() and use_xpu: lowercase__ : Union[str, Any] = torch.xpu.device_count() lowercase__ : str = num_xpus lowercase__ : List[Any] = False if num_xpus > 1: lowercase__ : str = """MULTI_XPU""" else: lowercase__ : Optional[Any] = """NO""" elif is_npu_available(): lowercase__ : Tuple = torch.npu.device_count() lowercase__ : Union[str, Any] = num_npus lowercase__ : Union[str, Any] = False if num_npus > 1: lowercase__ : List[Any] = """MULTI_NPU""" else: lowercase__ : int = """NO""" else: lowercase__ : Union[str, Any] = 0 lowercase__ : str = True lowercase__ : Union[str, Any] = 1 lowercase__ : int = """NO""" lowercase__ : Tuple = ClusterConfig(**lowercase_ ) config.to_json_file(lowercase_ ) return path def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' lowercase__ : List[str] = parser.add_parser("""default""" , parents=lowercase_ , help=lowercase_ , formatter_class=lowercase_ ) parser.add_argument( """--config_file""" , default=lowercase_ , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) , dest="""save_location""" , ) parser.add_argument( """--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=lowercase_ , help="""Whether or not to use mixed precision training. """ """Choose between FP16 and BF16 (bfloat16) training. """ """BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , ) parser.set_defaults(func=lowercase_ ) return parser def UpperCamelCase ( lowercase_ ) -> Any: '''simple docstring''' lowercase__ : Optional[Any] = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F'accelerate configuration saved at {config_file}' )
12
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter lowerCamelCase__ : Any = """Create a default config file for Accelerate with only a few flags set.""" def UpperCamelCase ( lowercase_="no" , lowercase_ = default_json_config_file , lowercase_ = False ) -> Any: '''simple docstring''' lowercase__ : Any = Path(lowercase_ ) path.parent.mkdir(parents=lowercase_ , exist_ok=lowercase_ ) if path.exists(): print( F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' ) return False lowercase__ : int = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' ) lowercase__ : Dict = { """compute_environment""": """LOCAL_MACHINE""", """mixed_precision""": mixed_precision, } if torch.cuda.is_available(): lowercase__ : Any = torch.cuda.device_count() lowercase__ : Any = num_gpus lowercase__ : Optional[int] = False if num_gpus > 1: lowercase__ : Tuple = """MULTI_GPU""" else: lowercase__ : Optional[Any] = """NO""" elif is_xpu_available() and use_xpu: lowercase__ : Union[str, Any] = torch.xpu.device_count() lowercase__ : str = num_xpus lowercase__ : List[Any] = False if num_xpus > 1: lowercase__ : str = """MULTI_XPU""" else: lowercase__ : Optional[Any] = """NO""" elif is_npu_available(): lowercase__ : Tuple = torch.npu.device_count() lowercase__ : Union[str, Any] = num_npus lowercase__ : Union[str, Any] = False if num_npus > 1: lowercase__ : List[Any] = """MULTI_NPU""" else: lowercase__ : int = """NO""" else: lowercase__ : Union[str, Any] = 0 lowercase__ : str = True lowercase__ : Union[str, Any] = 1 lowercase__ : int = """NO""" lowercase__ : Tuple = ClusterConfig(**lowercase_ ) config.to_json_file(lowercase_ ) return path def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' lowercase__ : List[str] = parser.add_parser("""default""" , parents=lowercase_ , help=lowercase_ , formatter_class=lowercase_ ) parser.add_argument( """--config_file""" , default=lowercase_ , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) , dest="""save_location""" , ) parser.add_argument( """--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=lowercase_ , help="""Whether or not to use mixed precision training. """ """Choose between FP16 and BF16 (bfloat16) training. """ """BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , ) parser.set_defaults(func=lowercase_ ) return parser def UpperCamelCase ( lowercase_ ) -> Any: '''simple docstring''' lowercase__ : Optional[Any] = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F'accelerate configuration saved at {config_file}' )
12
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ : List[Any] = logging.get_logger(__name__) lowerCamelCase__ : Union[str, Any] = { """YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""", """YituTech/conv-bert-medium-small""": ( """https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json""" ), """YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""", # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Union[str, Any] = 'convbert' def __init__( self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=9 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) lowercase__ : Dict = vocab_size lowercase__ : List[Any] = hidden_size lowercase__ : Optional[Any] = num_hidden_layers lowercase__ : Union[str, Any] = num_attention_heads lowercase__ : List[str] = intermediate_size lowercase__ : Optional[int] = hidden_act lowercase__ : Tuple = hidden_dropout_prob lowercase__ : List[str] = attention_probs_dropout_prob lowercase__ : Tuple = max_position_embeddings lowercase__ : Dict = type_vocab_size lowercase__ : Union[str, Any] = initializer_range lowercase__ : Dict = layer_norm_eps lowercase__ : Tuple = embedding_size lowercase__ : List[str] = head_ratio lowercase__ : Dict = conv_kernel_size lowercase__ : Dict = num_groups lowercase__ : int = classifier_dropout class _snake_case ( UpperCAmelCase_ ): @property def lowercase__ ( self): '''simple docstring''' if self.task == "multiple-choice": lowercase__ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase__ : str = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ])
12
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType lowerCamelCase__ : str = logging.get_logger(__name__) lowerCamelCase__ : Dict = { """openai/imagegpt-small""": """""", """openai/imagegpt-medium""": """""", """openai/imagegpt-large""": """""", } class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Tuple = 'imagegpt' __lowerCAmelCase : Tuple = ['past_key_values'] __lowerCAmelCase : str = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , SCREAMING_SNAKE_CASE_=5_12 + 1 , SCREAMING_SNAKE_CASE_=32 * 32 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="quick_gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' lowercase__ : Any = vocab_size lowercase__ : Optional[Any] = n_positions lowercase__ : List[str] = n_embd lowercase__ : Tuple = n_layer lowercase__ : Optional[Any] = n_head lowercase__ : List[str] = n_inner lowercase__ : Union[str, Any] = activation_function lowercase__ : Union[str, Any] = resid_pdrop lowercase__ : Optional[int] = embd_pdrop lowercase__ : int = attn_pdrop lowercase__ : str = layer_norm_epsilon lowercase__ : Optional[Any] = initializer_range lowercase__ : Any = scale_attn_weights lowercase__ : str = use_cache lowercase__ : str = scale_attn_by_inverse_layer_idx lowercase__ : Optional[int] = reorder_and_upcast_attn lowercase__ : Union[str, Any] = tie_word_embeddings super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) class _snake_case ( UpperCAmelCase_ ): @property def lowercase__ ( self): '''simple docstring''' return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ]) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 32 , ): '''simple docstring''' lowercase__ : List[Any] = self._generate_dummy_images(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = dict(preprocessor(images=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_)) return inputs
12
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder lowerCamelCase__ : Any = datasets.utils.logging.get_logger(__name__) class _snake_case ( folder_based_builder.FolderBasedBuilderConfig ): __lowerCAmelCase : bool = None __lowerCAmelCase : bool = None class _snake_case ( folder_based_builder.FolderBasedBuilder ): __lowerCAmelCase : Optional[Any] = datasets.Audio() __lowerCAmelCase : Union[str, Any] = 'audio' __lowerCAmelCase : str = AudioFolderConfig __lowerCAmelCase : List[str] # definition at the bottom of the script __lowerCAmelCase : Optional[int] = AudioClassification(audio_column='audio' , label_column='label' ) lowerCamelCase__ : int = [ """.aiff""", """.au""", """.avr""", """.caf""", """.flac""", """.htk""", """.svx""", """.mat4""", """.mat5""", """.mpc2k""", """.ogg""", """.paf""", """.pvf""", """.raw""", """.rf64""", """.sd2""", """.sds""", """.ircam""", """.voc""", """.w64""", """.wav""", """.nist""", """.wavex""", """.wve""", """.xi""", """.mp3""", """.opus""", ] lowerCamelCase__ : int = AUDIO_EXTENSIONS
12
1
# Note: if you intend to run this script make sure you look under scripts/fsmt/ # to locate the appropriate script to do the work correctly. There is a set of scripts to: # - download and prepare data and run the conversion script # - perform eval to get the best hparam into the config # - generate model_cards - useful if you have multiple models from the same paper import argparse import json import os import re from collections import OrderedDict from os.path import basename, dirname import fairseq import torch from fairseq import hub_utils from fairseq.data.dictionary import Dictionary from transformers import FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() lowerCamelCase__ : List[Any] = 2 # based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping` # values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults: # # * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users) # * `early_stopping`: `False` consistently scored better # * `length_penalty` varied, so will assign the best one depending on the model lowerCamelCase__ : Dict = { # fairseq: """wmt19-ru-en""": {"""length_penalty""": 1.1}, """wmt19-en-ru""": {"""length_penalty""": 1.15}, """wmt19-en-de""": {"""length_penalty""": 1.0}, """wmt19-de-en""": {"""length_penalty""": 1.1}, # allenai: """wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6}, """wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6}, """wmt16-en-de-12-1""": {"""length_penalty""": 0.8}, """wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6}, """wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6}, } # this remaps the different models to their organization names lowerCamelCase__ : str = {} for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowerCamelCase__ : List[str] = """facebook""" for m in [ "wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1", "wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big", ]: lowerCamelCase__ : Optional[Any] = """allenai""" def UpperCamelCase ( lowercase_ ) -> Union[str, Any]: '''simple docstring''' lowercase__ : Any = dict((re.sub(R"""@@$""" , """""" , lowercase_ ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , lowercase_ ), v) for k, v in d.items() ) lowercase__ : List[str] = """<s> <pad> </s> <unk>""".split() # restore the special tokens for k in keep_keys: del da[F'{k}</w>'] lowercase__ : int = d[k] # restore return da def UpperCamelCase ( lowercase_ , lowercase_ ) -> Dict: '''simple docstring''' assert os.path.exists(lowercase_ ) os.makedirs(lowercase_ , exist_ok=lowercase_ ) print(F'Writing results to {pytorch_dump_folder_path}' ) # handle various types of models lowercase__ : int = basename(lowercase_ ) lowercase__ : List[Any] = dirname(lowercase_ ) lowercase__ : List[str] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel lowercase__ : int = cls.hub_models() lowercase__ : Tuple = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""} lowercase__ : Optional[Any] = """.""" # note: since the model dump is old, fairseq has upgraded its model some # time later, and it does a whole lot of rewrites and splits on the saved # weights, therefore we can't use torch.load() directly on the model file. # see: upgrade_state_dict(state_dict) in fairseq_model.py print(F'using checkpoint {checkpoint_file}' ) lowercase__ : Optional[int] = hub_utils.from_pretrained( lowercase_ , lowercase_ , lowercase_ , archive_map=lowercase_ , **lowercase_ ) lowercase__ : Tuple = vars(chkpt["""args"""]["""model"""] ) lowercase__ : List[Any] = args["""source_lang"""] lowercase__ : Dict = args["""target_lang"""] lowercase__ : Optional[Any] = dirname(lowercase_ ) lowercase__ : int = basename(lowercase_ ) # dicts lowercase__ : Union[str, Any] = os.path.join(lowercase_ , F'dict.{src_lang}.txt' ) lowercase__ : Optional[int] = os.path.join(lowercase_ , F'dict.{tgt_lang}.txt' ) lowercase__ : Optional[int] = Dictionary.load(lowercase_ ) lowercase__ : Union[str, Any] = rewrite_dict_keys(src_dict.indices ) lowercase__ : str = len(lowercase_ ) lowercase__ : Any = os.path.join(lowercase_ , """vocab-src.json""" ) print(F'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' ) with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) ) # detect whether this is a do_lower_case situation, which can be derived by checking whether we # have at least one uppercase letter in the source vocab lowercase__ : Any = True for k in src_vocab.keys(): if not k.islower(): lowercase__ : List[str] = False break lowercase__ : Tuple = Dictionary.load(lowercase_ ) lowercase__ : Any = rewrite_dict_keys(tgt_dict.indices ) lowercase__ : Any = len(lowercase_ ) lowercase__ : Any = os.path.join(lowercase_ , """vocab-tgt.json""" ) print(F'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' ) with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) ) # merges_file (bpecodes) lowercase__ : Union[str, Any] = os.path.join(lowercase_ , VOCAB_FILES_NAMES["""merges_file"""] ) for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code" lowercase__ : Optional[int] = os.path.join(lowercase_ , lowercase_ ) if os.path.exists(lowercase_ ): break with open(lowercase_ , encoding="""utf-8""" ) as fin: lowercase__ : List[Any] = fin.read() lowercase__ : Optional[Any] = re.sub(R""" \d+$""" , """""" , lowercase_ , 0 , re.M ) # remove frequency number print(F'Generating {merges_file}' ) with open(lowercase_ , """w""" , encoding="""utf-8""" ) as fout: fout.write(lowercase_ ) # model config lowercase__ : Tuple = os.path.join(lowercase_ , """config.json""" ) # validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe - # may have to modify the tokenizer if a different type is used by a future model assert args["bpe"] == "fastbpe", F'need to extend tokenizer to support bpe={args["bpe"]}' assert args["tokenizer"] == "moses", F'need to extend tokenizer to support bpe={args["tokenizer"]}' lowercase__ : List[str] = { """architectures""": ["""FSMTForConditionalGeneration"""], """model_type""": """fsmt""", """activation_dropout""": args["""activation_dropout"""], """activation_function""": """relu""", """attention_dropout""": args["""attention_dropout"""], """d_model""": args["""decoder_embed_dim"""], """dropout""": args["""dropout"""], """init_std""": 0.02, """max_position_embeddings""": args["""max_source_positions"""], """num_hidden_layers""": args["""encoder_layers"""], """src_vocab_size""": src_vocab_size, """tgt_vocab_size""": tgt_vocab_size, """langs""": [src_lang, tgt_lang], """encoder_attention_heads""": args["""encoder_attention_heads"""], """encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""], """encoder_layerdrop""": args["""encoder_layerdrop"""], """encoder_layers""": args["""encoder_layers"""], """decoder_attention_heads""": args["""decoder_attention_heads"""], """decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""], """decoder_layerdrop""": args["""decoder_layerdrop"""], """decoder_layers""": args["""decoder_layers"""], """bos_token_id""": 0, """pad_token_id""": 1, """eos_token_id""": 2, """is_encoder_decoder""": True, """scale_embedding""": not args["""no_scale_embedding"""], """tie_word_embeddings""": args["""share_all_embeddings"""], } # good hparam defaults to start with lowercase__ : Optional[int] = 5 lowercase__ : List[str] = False if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]: lowercase__ : Optional[int] = best_score_hparams[model_dir]["""length_penalty"""] else: lowercase__ : Union[str, Any] = 1.0 print(F'Generating {fsmt_model_config_file}' ) with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) ) # tokenizer config lowercase__ : Optional[int] = os.path.join(lowercase_ , lowercase_ ) lowercase__ : int = { """langs""": [src_lang, tgt_lang], """model_max_length""": 10_24, """do_lower_case""": do_lower_case, } print(F'Generating {fsmt_tokenizer_config_file}' ) with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) ) # model lowercase__ : Dict = chkpt["""models"""][0] lowercase__ : Optional[Any] = model.state_dict() # rename keys to start with 'model.' lowercase__ : Any = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() ) # remove unneeded keys lowercase__ : List[Any] = [ """model.model""", """model.encoder.version""", """model.decoder.version""", """model.encoder_embed_tokens.weight""", """model.decoder_embed_tokens.weight""", """model.encoder.embed_positions._float_tensor""", """model.decoder.embed_positions._float_tensor""", ] for k in ignore_keys: model_state_dict.pop(lowercase_ , lowercase_ ) lowercase__ : str = FSMTConfig.from_pretrained(lowercase_ ) lowercase__ : List[str] = FSMTForConditionalGeneration(lowercase_ ) # check that it loads ok model_new.load_state_dict(lowercase_ , strict=lowercase_ ) # save lowercase__ : str = os.path.join(lowercase_ , lowercase_ ) print(F'Generating {pytorch_weights_dump_path}' ) torch.save(lowercase_ , lowercase_ ) print("""Conversion is done!""" ) print("""\nLast step is to upload the files to s3""" ) print(F'cd {data_root}' ) print(F'transformers-cli upload {model_dir}' ) if __name__ == "__main__": lowerCamelCase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--fsmt_checkpoint_path""", default=None, type=str, required=True, help=( """Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,""" """ bpecodes, etc.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowerCamelCase__ : List[str] = parser.parse_args() convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
12
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : int = (DDPMScheduler,) def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = { """num_train_timesteps""": 10_00, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", """variance_type""": """fixed_small""", """clip_sample""": True, } config.update(**SCREAMING_SNAKE_CASE_) return config def lowercase__ ( self): '''simple docstring''' for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2]): self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , ) def lowercase__ ( self): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for t in [0, 5_00, 9_99]: self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = self.scheduler_classes[0] lowercase__ : Union[str, Any] = self.get_scheduler_config() lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_) assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87) - 0.0_0_9_7_9)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99) - 0.0_2)) < 1E-5 def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = self.scheduler_classes[0] lowercase__ : str = self.get_scheduler_config() lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_) lowercase__ : int = len(SCREAMING_SNAKE_CASE_) lowercase__ : Any = self.dummy_model() lowercase__ : List[Any] = self.dummy_sample_deter lowercase__ : str = torch.manual_seed(0) for t in reversed(range(SCREAMING_SNAKE_CASE_)): # 1. predict noise residual lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) # 2. predict previous mean of sample x_t-1 lowercase__ : List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowercase__ : str = pred_prev_sample lowercase__ : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_)) lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_)) assert abs(result_sum.item() - 2_5_8.9_6_0_6) < 1E-2 assert abs(result_mean.item() - 0.3_3_7_2) < 1E-3 def lowercase__ ( self): '''simple docstring''' lowercase__ : List[Any] = self.scheduler_classes[0] lowercase__ : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""") lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_) lowercase__ : Dict = len(SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = self.dummy_model() lowercase__ : Union[str, Any] = self.dummy_sample_deter lowercase__ : int = torch.manual_seed(0) for t in reversed(range(SCREAMING_SNAKE_CASE_)): # 1. predict noise residual lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) # 2. predict previous mean of sample x_t-1 lowercase__ : int = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowercase__ : Tuple = pred_prev_sample lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_)) lowercase__ : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_)) assert abs(result_sum.item() - 2_0_2.0_2_9_6) < 1E-2 assert abs(result_mean.item() - 0.2_6_3_1) < 1E-3 def lowercase__ ( self): '''simple docstring''' lowercase__ : str = self.scheduler_classes[0] lowercase__ : int = self.get_scheduler_config() lowercase__ : str = scheduler_class(**SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = [1_00, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = scheduler.timesteps for i, timestep in enumerate(SCREAMING_SNAKE_CASE_): if i == len(SCREAMING_SNAKE_CASE_) - 1: lowercase__ : Optional[int] = -1 else: lowercase__ : Tuple = timesteps[i + 1] lowercase__ : Any = scheduler.previous_timestep(SCREAMING_SNAKE_CASE_) lowercase__ : int = prev_t.item() self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = self.scheduler_classes[0] lowercase__ : List[Any] = self.get_scheduler_config() lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = [1_00, 87, 50, 51, 0] with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`custom_timesteps` must be in descending order."""): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = self.scheduler_classes[0] lowercase__ : List[Any] = self.get_scheduler_config() lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_) lowercase__ : int = [1_00, 87, 50, 1, 0] lowercase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE_) with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`."""): scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = self.scheduler_classes[0] lowercase__ : int = self.get_scheduler_config() lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_) lowercase__ : str = [scheduler.config.num_train_timesteps] with self.assertRaises( SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
12
1
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class _snake_case : def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=9 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0_0_2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ): '''simple docstring''' lowercase__ : Optional[int] = parent lowercase__ : Any = batch_size lowercase__ : Dict = encoder_seq_length lowercase__ : int = decoder_seq_length # For common tests lowercase__ : Tuple = self.decoder_seq_length lowercase__ : List[Any] = is_training lowercase__ : Dict = use_attention_mask lowercase__ : Any = use_labels lowercase__ : Any = vocab_size lowercase__ : int = hidden_size lowercase__ : List[str] = num_hidden_layers lowercase__ : Optional[int] = num_attention_heads lowercase__ : str = d_ff lowercase__ : Tuple = relative_attention_num_buckets lowercase__ : Optional[Any] = dropout_rate lowercase__ : List[str] = initializer_factor lowercase__ : Dict = eos_token_id lowercase__ : List[Any] = pad_token_id lowercase__ : Optional[int] = decoder_start_token_id lowercase__ : Any = None lowercase__ : str = decoder_layers def lowercase__ ( self): '''simple docstring''' return TaConfig.from_pretrained("""google/umt5-base""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ): '''simple docstring''' if attention_mask is None: lowercase__ : int = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: lowercase__ : List[Any] = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: lowercase__ : Optional[int] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE_) if decoder_head_mask is None: lowercase__ : Any = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE_) if cross_attn_head_mask is None: lowercase__ : int = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE_) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size) lowercase__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input lowercase__ : str = input_ids.clamp(self.pad_token_id + 1) lowercase__ : List[Any] = decoder_input_ids.clamp(self.pad_token_id + 1) lowercase__ : List[str] = self.get_config() lowercase__ : List[Any] = config.num_attention_heads lowercase__ : Dict = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) return config, input_dict def lowercase__ ( self): '''simple docstring''' lowercase__ , lowercase__ : Union[str, Any] = self.prepare_config_and_inputs() return config, inputs_dict def lowercase__ ( self): '''simple docstring''' return TaConfig( vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def lowercase__ ( self): '''simple docstring''' return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' lowercase__ : List[Any] = UMTaModel(config=SCREAMING_SNAKE_CASE_) model.to(SCREAMING_SNAKE_CASE_) model.eval() lowercase__ : Union[str, Any] = model( input_ids=SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , ) lowercase__ : int = model(input_ids=SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_) lowercase__ : int = result.last_hidden_state lowercase__ : Optional[int] = result.past_key_values lowercase__ : Union[str, Any] = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size)) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(SCREAMING_SNAKE_CASE_) , config.num_layers) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0]) , 4) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' lowercase__ : Optional[Any] = UMTaModel(config=SCREAMING_SNAKE_CASE_).get_decoder().to(SCREAMING_SNAKE_CASE_).eval() # first forward pass lowercase__ : Any = model(SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_) == len(SCREAMING_SNAKE_CASE_)) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_) == len(SCREAMING_SNAKE_CASE_) + 1) lowercase__ , lowercase__ : Tuple = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowercase__ : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size) # append to next input_ids and lowercase__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1) lowercase__ : Any = model(SCREAMING_SNAKE_CASE_)["""last_hidden_state"""] lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_)["""last_hidden_state"""] # select random slice lowercase__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item() lowercase__ : Any = output_from_no_past[:, -1, random_slice_idx].detach() lowercase__ : Union[str, Any] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3)) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' lowercase__ : Dict = UMTaModel(config=SCREAMING_SNAKE_CASE_).to(SCREAMING_SNAKE_CASE_).half().eval() lowercase__ : str = model(**SCREAMING_SNAKE_CASE_)["""last_hidden_state"""] self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE_).any().item()) @require_torch class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Any = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) __lowerCAmelCase : str = (UMTaForConditionalGeneration,) if is_torch_available() else () __lowerCAmelCase : Dict = ( { 'conversational': UMTaForConditionalGeneration, 'feature-extraction': UMTaModel, 'summarization': UMTaForConditionalGeneration, 'text2text-generation': UMTaForConditionalGeneration, 'translation': UMTaForConditionalGeneration, 'question-answering': UMTaForQuestionAnswering, } if is_torch_available() else {} ) __lowerCAmelCase : Any = True __lowerCAmelCase : Optional[Any] = False __lowerCAmelCase : Optional[int] = False __lowerCAmelCase : List[str] = True __lowerCAmelCase : Optional[int] = True # The small UMT5 model needs higher percentages for CPU/MP tests __lowerCAmelCase : Tuple = [0.8, 0.9] def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = UMTaModelTester(self) @unittest.skip("""Test has a segmentation fault on torch 1.8.0""") def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() lowercase__ : str = UMTaModel(config_and_inputs[0]).to(SCREAMING_SNAKE_CASE_) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( SCREAMING_SNAKE_CASE_ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'{tmpdirname}/t5_test.onnx' , export_params=SCREAMING_SNAKE_CASE_ , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , ) @unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""") def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : int = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""] lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() lowercase__ : int = config_and_inputs[0] lowercase__ : Tuple = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE_).eval() model.to(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = { """head_mask""": torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE_), """decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE_), """cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE_), } for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE_ , head_masking.items()): lowercase__ : str = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": lowercase__ : Optional[int] = torch.ones( config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = model.generate( config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE_ , return_dict_in_generate=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) # We check the state of decoder_attentions and cross_attentions just from the last step lowercase__ : int = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights]) , 0.0) @unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""") def lowercase__ ( self): '''simple docstring''' pass @require_torch @require_sentencepiece @require_tokenizers class _snake_case ( unittest.TestCase ): @slow @unittest.skip( """Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""") def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=SCREAMING_SNAKE_CASE_).to(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=SCREAMING_SNAKE_CASE_ , legacy=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = [ """Bonjour monsieur <extra_id_0> bien <extra_id_1>.""", """No se como puedo <extra_id_0>.""", """This is the reason why we <extra_id_0> them.""", """The <extra_id_0> walks in <extra_id_1>, seats""", """A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""", ] lowercase__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE_).input_ids # fmt: off lowercase__ : Dict = torch.tensor( [ [ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1], ]) # fmt: on torch.testing.assert_allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = model.generate(input_ids.to(SCREAMING_SNAKE_CASE_)) lowercase__ : List[Any] = [ """<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""", """<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", """<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", """<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", """<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", ] lowercase__ : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
12
def UpperCamelCase ( lowercase_ ) -> float: '''simple docstring''' if not nums: # Makes sure that the list is not empty raise ValueError("""List is empty""" ) lowercase__ : int = sum(lowercase_ ) / len(lowercase_ ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(lowercase_ ) if __name__ == "__main__": import doctest doctest.testmod()
12
1
import os import sys import unittest lowerCamelCase__ : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path lowerCamelCase__ : List[Any] = os.path.join(git_repo_path, """src""", """diffusers""") class _snake_case ( unittest.TestCase ): def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = find_backend(""" if not is_torch_available():""") self.assertEqual(SCREAMING_SNAKE_CASE_ , """torch""") # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") lowercase__ : int = find_backend(""" if not (is_torch_available() and is_transformers_available()):""") self.assertEqual(SCREAMING_SNAKE_CASE_ , """torch_and_transformers""") # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") lowercase__ : Tuple = find_backend( """ if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""") self.assertEqual(SCREAMING_SNAKE_CASE_ , """torch_and_transformers_and_onnx""") def lowercase__ ( self): '''simple docstring''' lowercase__ : int = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""" , SCREAMING_SNAKE_CASE_) self.assertIn("""torch_and_transformers""" , SCREAMING_SNAKE_CASE_) self.assertIn("""flax_and_transformers""" , SCREAMING_SNAKE_CASE_) self.assertIn("""torch_and_transformers_and_onnx""" , SCREAMING_SNAKE_CASE_) # Likewise, we can't assert on the exact content of a key self.assertIn("""UNet2DModel""" , objects["""torch"""]) self.assertIn("""FlaxUNet2DConditionModel""" , objects["""flax"""]) self.assertIn("""StableDiffusionPipeline""" , objects["""torch_and_transformers"""]) self.assertIn("""FlaxStableDiffusionPipeline""" , objects["""flax_and_transformers"""]) self.assertIn("""LMSDiscreteScheduler""" , objects["""torch_and_scipy"""]) self.assertIn("""OnnxStableDiffusionPipeline""" , objects["""torch_and_transformers_and_onnx"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = create_dummy_object("""CONSTANT""" , """'torch'""") self.assertEqual(SCREAMING_SNAKE_CASE_ , """\nCONSTANT = None\n""") lowercase__ : Any = create_dummy_object("""function""" , """'torch'""") self.assertEqual( SCREAMING_SNAKE_CASE_ , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""") lowercase__ : Tuple = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, 'torch') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, 'torch') """ lowercase__ : List[str] = create_dummy_object("""FakeClass""" , """'torch'""") self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : str = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) """ lowercase__ : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]}) self.assertEqual(dummy_files["""torch"""] , SCREAMING_SNAKE_CASE_)
12
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__) class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Any = ['pixel_values'] def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 8 , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = do_rescale lowercase__ : List[Any] = rescale_factor lowercase__ : Tuple = do_pad lowercase__ : Optional[Any] = pad_size def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None): '''simple docstring''' lowercase__ , lowercase__ : Optional[int] = get_image_size(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = (old_height // size + 1) * size - old_height lowercase__ : str = (old_width // size + 1) * size - old_width return pad(SCREAMING_SNAKE_CASE_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' lowercase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : Union[str, Any] = do_pad if do_pad is not None else self.do_pad lowercase__ : Optional[Any] = pad_size if pad_size is not None else self.pad_size lowercase__ : str = make_list_of_images(SCREAMING_SNAKE_CASE_) if not valid_images(SCREAMING_SNAKE_CASE_): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""") if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""") # All transformations expect numpy arrays. lowercase__ : List[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE_) for image in images] if do_rescale: lowercase__ : str = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_) for image in images] if do_pad: lowercase__ : List[str] = [self.pad(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_) for image in images] lowercase__ : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) for image in images] lowercase__ : Dict = {"""pixel_values""": images} return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_)
12
1
import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline lowerCamelCase__ : Any = { """n_samples""": 6_4, """horizon""": 3_2, """num_inference_steps""": 2_0, """n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network """scale_grad_by_std""": True, """scale""": 0.1, """eta""": 0.0, """t_grad_cutoff""": 2, """device""": """cpu""", } if __name__ == "__main__": lowerCamelCase__ : Any = """hopper-medium-v2""" lowerCamelCase__ : Optional[Any] = gym.make(env_name) lowerCamelCase__ : List[Any] = ValueGuidedRLPipeline.from_pretrained( """bglick13/hopper-medium-v2-value-function-hor32""", env=env, ) env.seed(0) lowerCamelCase__ : List[Any] = env.reset() lowerCamelCase__ : Optional[Any] = 0 lowerCamelCase__ : Optional[int] = 0 lowerCamelCase__ : List[str] = 1_0_0_0 lowerCamelCase__ : str = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy lowerCamelCase__ : str = pipeline(obs, planning_horizon=3_2) # execute action in environment lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = env.step(denorm_actions) lowerCamelCase__ : Union[str, Any] = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:''' f''' {total_score}''' ) # save observations for rendering rollout.append(next_observation.copy()) lowerCamelCase__ : Tuple = next_observation except KeyboardInterrupt: pass print(f'''Total reward: {total_reward}''')
12
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu lowerCamelCase__ : Optional[int] = [ """EAGER""", """AOT_EAGER""", """INDUCTOR""", """NVFUSER""", """AOT_NVFUSER""", """AOT_CUDAGRAPHS""", """OFI""", """FX2TRT""", """ONNXRT""", """IPEX""", ] def UpperCamelCase ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[Any]: '''simple docstring''' lowercase__ : List[Any] = True while ask_again: lowercase__ : Tuple = input(lowercase_ ) try: if default is not None and len(lowercase_ ) == 0: return default return convert_value(lowercase_ ) if convert_value is not None else result except Exception: if error_message is not None: print(lowercase_ ) def UpperCamelCase ( lowercase_ , lowercase_=[] , lowercase_=None , lowercase_=0 ) -> Union[str, Any]: '''simple docstring''' lowercase__ : List[Any] = BulletMenu(lowercase_ , lowercase_ ) lowercase__ : Any = menu.run(default_choice=lowercase_ ) return convert_value(lowercase_ ) if convert_value is not None else result def UpperCamelCase ( lowercase_ ) -> str: '''simple docstring''' lowercase__ : Union[str, Any] = int(lowercase_ ) return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] ) def UpperCamelCase ( lowercase_ ) -> Optional[int]: '''simple docstring''' lowercase__ : List[str] = int(lowercase_ ) return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] ) def UpperCamelCase ( lowercase_ ) -> str: '''simple docstring''' lowercase__ : str = int(lowercase_ ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def UpperCamelCase ( lowercase_ ) -> Union[str, Any]: '''simple docstring''' lowercase__ : List[Any] = int(lowercase_ ) return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] ) def UpperCamelCase ( lowercase_ ) -> Optional[int]: '''simple docstring''' lowercase__ : List[Any] = int(lowercase_ ) return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] ) def UpperCamelCase ( lowercase_ ) -> Optional[int]: '''simple docstring''' return {"yes": True, "no": False}[value.lower()] class _snake_case ( argparse.RawDescriptionHelpFormatter ): def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : int = super()._format_usage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = usage.replace("""<command> [<args>] """ , """""") return usage
12
1
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]: '''simple docstring''' lowercase__ : Dict = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors lowercase__ : str = load_file(lowercase_ ) lowercase__ : List[Any] = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: lowercase__ : List[Any] = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" ) lowercase__ : Any = pipeline.text_encoder else: lowercase__ : List[str] = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" ) lowercase__ : List[Any] = pipeline.unet # find the target layer lowercase__ : str = layer_infos.pop(0 ) while len(lowercase_ ) > -1: try: lowercase__ : Any = curr_layer.__getattr__(lowercase_ ) if len(lowercase_ ) > 0: lowercase__ : Any = layer_infos.pop(0 ) elif len(lowercase_ ) == 0: break except Exception: if len(lowercase_ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: lowercase__ : Union[str, Any] = layer_infos.pop(0 ) lowercase__ : Optional[Any] = [] if "lora_down" in key: pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) ) pair_keys.append(lowercase_ ) else: pair_keys.append(lowercase_ ) pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: lowercase__ : int = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) lowercase__ : Optional[int] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 ) else: lowercase__ : str = state_dict[pair_keys[0]].to(torch.floataa ) lowercase__ : List[Any] = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ) # update visited list for item in pair_keys: visited.append(lowercase_ ) return pipeline if __name__ == "__main__": lowerCamelCase__ : Tuple = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") lowerCamelCase__ : Dict = parser.parse_args() lowerCamelCase__ : int = args.base_model_path lowerCamelCase__ : Any = args.checkpoint_path lowerCamelCase__ : Union[str, Any] = args.dump_path lowerCamelCase__ : str = args.lora_prefix_unet lowerCamelCase__ : Optional[int] = args.lora_prefix_text_encoder lowerCamelCase__ : Dict = args.alpha lowerCamelCase__ : str = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) lowerCamelCase__ : str = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
12
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ : Tuple = { """configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""], """processing_mgp_str""": ["""MgpstrProcessor"""], """tokenization_mgp_str""": ["""MgpstrTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Optional[int] = [ """MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""", """MgpstrModel""", """MgpstrPreTrainedModel""", """MgpstrForSceneTextRecognition""", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
12
1
import sys lowerCamelCase__ : Any = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def UpperCamelCase ( lowercase_ = N ) -> int: '''simple docstring''' lowercase__ : Tuple = -sys.maxsize - 1 for i in range(len(lowercase_ ) - 12 ): lowercase__ : List[Any] = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: lowercase__ : List[Any] = product return largest_product if __name__ == "__main__": print(f'''{solution() = }''')
12
import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class _snake_case ( UpperCAmelCase_ ): def __init__( self): '''simple docstring''' lowercase__ : List[Any] = [] def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_init_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_train_begin""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_train_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_epoch_begin""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_epoch_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_step_begin""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_step_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_evaluate""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_predict""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_save""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_log""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_prediction_step""") @require_torch class _snake_case ( unittest.TestCase ): def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = tempfile.mkdtemp() def lowercase__ ( self): '''simple docstring''' shutil.rmtree(self.output_dir) def lowercase__ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Any = RegressionDataset(length=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = RegressionDataset(length=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = RegressionModelConfig(a=SCREAMING_SNAKE_CASE_ , b=SCREAMING_SNAKE_CASE_) lowercase__ : Any = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE_) lowercase__ : Any = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE_ , report_to=[] , **SCREAMING_SNAKE_CASE_) return Trainer( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , callbacks=SCREAMING_SNAKE_CASE_ , ) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_)) # Order doesn't matter lowercase__ : str = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__) lowercase__ : Tuple = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__) for cba, cba in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assertEqual(SCREAMING_SNAKE_CASE_ , cba.__class__) elif not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE_) else: self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : int = ["""on_init_end""", """on_train_begin"""] lowercase__ : Union[str, Any] = 0 lowercase__ : Union[str, Any] = len(trainer.get_eval_dataloader()) lowercase__ : Dict = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader()) + ["""on_log""", """on_evaluate"""] for _ in range(trainer.state.num_train_epochs): expected_events.append("""on_epoch_begin""") for _ in range(SCREAMING_SNAKE_CASE_): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("""on_log""") if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("""on_save""") expected_events.append("""on_epoch_end""") if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def lowercase__ ( self): '''simple docstring''' lowercase__ : int = self.get_trainer() lowercase__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) # Callbacks passed at init are added to the default callbacks lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback]) expected_callbacks.append(SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback lowercase__ : Any = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback] lowercase__ : Tuple = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.remove(SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = self.get_trainer() lowercase__ : List[Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_) self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) trainer.add_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) # We can also add, pop, or remove by instance lowercase__ : Union[str, Any] = self.get_trainer() lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0] trainer.remove_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.remove(SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) lowercase__ : str = self.get_trainer() lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0] lowercase__ : Union[str, Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) trainer.add_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="""ignore""" , category=SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback]) trainer.train() lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) # Independent log/save/eval lowercase__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5) trainer.train() lowercase__ : List[str] = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5) trainer.train() lowercase__ : Dict = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""") trainer.train() lowercase__ : int = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) lowercase__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""") trainer.train() lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) # A bit of everything lowercase__ : Any = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , ) trainer.train() lowercase__ : str = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) # warning should be emitted for duplicated callbacks with patch("""transformers.trainer_callback.logger.warning""") as warn_mock: lowercase__ : Dict = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(SCREAMING_SNAKE_CASE_) in warn_mock.call_args[0][0]
12
1
from __future__ import annotations from typing import TypedDict class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : str __lowerCAmelCase : int def UpperCamelCase ( lowercase_ ) -> list[str]: '''simple docstring''' if not isinstance(lowercase_ , lowercase_ ): raise TypeError("""The parameter s type must be str.""" ) return [s[i:] + s[:i] for i in range(len(lowercase_ ) )] def UpperCamelCase ( lowercase_ ) -> BWTTransformDict: '''simple docstring''' if not isinstance(lowercase_ , lowercase_ ): raise TypeError("""The parameter s type must be str.""" ) if not s: raise ValueError("""The parameter s must not be empty.""" ) lowercase__ : List[str] = all_rotations(lowercase_ ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation lowercase__ : BWTTransformDict = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(lowercase_ ), } return response def UpperCamelCase ( lowercase_ , lowercase_ ) -> str: '''simple docstring''' if not isinstance(lowercase_ , lowercase_ ): raise TypeError("""The parameter bwt_string type must be str.""" ) if not bwt_string: raise ValueError("""The parameter bwt_string must not be empty.""" ) try: lowercase__ : Optional[Any] = int(lowercase_ ) except ValueError: raise TypeError( """The parameter idx_original_string type must be int or passive""" """ of cast to int.""" ) if idx_original_string < 0: raise ValueError("""The parameter idx_original_string must not be lower than 0.""" ) if idx_original_string >= len(lowercase_ ): raise ValueError( """The parameter idx_original_string must be lower than""" """ len(bwt_string).""" ) lowercase__ : str = [""""""] * len(lowercase_ ) for _ in range(len(lowercase_ ) ): for i in range(len(lowercase_ ) ): lowercase__ : List[Any] = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": lowerCamelCase__ : Tuple = """Provide a string that I will generate its BWT transform: """ lowerCamelCase__ : Dict = input(entry_msg).strip() lowerCamelCase__ : int = bwt_transform(s) print( f'''Burrows Wheeler transform for string \'{s}\' results ''' f'''in \'{result["bwt_string"]}\'''' ) lowerCamelCase__ : List[str] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""]) print( f'''Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' ''' f'''we get original string \'{original_string}\'''' )
12
import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class _snake_case ( UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = RoCBertTokenizer __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : str = False __lowerCAmelCase : List[Any] = True __lowerCAmelCase : Optional[int] = filter_non_english def lowercase__ ( self): '''simple docstring''' super().setUp() lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""] lowercase__ : Dict = {} lowercase__ : Tuple = {} for i, value in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Tuple = i lowercase__ : Any = i lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""]) lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens])) with open(self.word_shape_file , """w""" , encoding="""utf-8""") as word_shape_writer: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_) with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""") as word_pronunciation_writer: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file) lowercase__ : Optional[int] = tokenizer.tokenize("""你好[SEP]你是谁""") self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""]) self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8]) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8]) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8]) def lowercase__ ( self): '''simple docstring''' lowercase__ : int = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""") , ["""ah""", """\u535A""", """\u63A8""", """zz"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""hello""", """!""", """how""", """are""", """you""", """?"""]) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""]) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""h\u00E9llo"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""]) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""]) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=["""[UNK]"""]) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""") , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] lowercase__ : Optional[int] = {} for i, token in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Optional[Any] = i lowercase__ : Union[str, Any] = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token="""[UNK]""") self.assertListEqual(tokenizer.tokenize("""""") , []) self.assertListEqual(tokenizer.tokenize("""unwanted running""") , ["""un""", """##want""", """##ed""", """runn""", """##ing"""]) self.assertListEqual(tokenizer.tokenize("""unwantedX running""") , ["""[UNK]""", """runn""", """##ing"""]) def lowercase__ ( self): '''simple docstring''' self.assertTrue(_is_whitespace(""" """)) self.assertTrue(_is_whitespace("""\t""")) self.assertTrue(_is_whitespace("""\r""")) self.assertTrue(_is_whitespace("""\n""")) self.assertTrue(_is_whitespace("""\u00A0""")) self.assertFalse(_is_whitespace("""A""")) self.assertFalse(_is_whitespace("""-""")) def lowercase__ ( self): '''simple docstring''' self.assertTrue(_is_control("""\u0005""")) self.assertFalse(_is_control("""A""")) self.assertFalse(_is_control(""" """)) self.assertFalse(_is_control("""\t""")) self.assertFalse(_is_control("""\r""")) def lowercase__ ( self): '''simple docstring''' self.assertTrue(_is_punctuation("""-""")) self.assertTrue(_is_punctuation("""$""")) self.assertTrue(_is_punctuation("""`""")) self.assertTrue(_is_punctuation(""".""")) self.assertFalse(_is_punctuation("""A""")) self.assertFalse(_is_punctuation(""" """)) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]]) if self.test_rust_tokenizer: lowercase__ : int = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]]) def lowercase__ ( self): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'): lowercase__ : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.' lowercase__ : List[str] = tokenizer_r.encode_plus( SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , ) lowercase__ : str = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ , """do_lower_case""") else False lowercase__ : Optional[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """A"""), ((1, 2), ""","""), ((3, 5), """na"""), ((5, 6), """##ï"""), ((6, 8), """##ve"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """Allen"""), ((21, 23), """##NL"""), ((23, 24), """##P"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """a"""), ((1, 2), ""","""), ((3, 8), """naive"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """allen"""), ((21, 23), """##nl"""), ((23, 24), """##p"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""])) self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = ["""的""", """人""", """有"""] lowercase__ : List[str] = """""".join(SCREAMING_SNAKE_CASE_) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'): lowercase__ : Union[str, Any] = True lowercase__ : Tuple = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : str = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Any = False lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_) # it is expected that only the first Chinese character is not preceded by "##". lowercase__ : Any = [ f'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_) ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) @slow def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file) lowercase__ : Optional[Any] = tokenizer.encode("""你好""" , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Any = tokenizer.encode("""你是谁""" , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}'): lowercase__ : Optional[int] = """你好,你是谁""" lowercase__ : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_) lowercase__ : Any = tokenizer.prepare_for_model( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
12
1
from sklearn.metrics import mean_squared_error import datasets lowerCamelCase__ : Tuple = """\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ lowerCamelCase__ : int = """\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. """ lowerCamelCase__ : Dict = """ Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. \"raw_values\" : Returns a full set of errors in case of multioutput input. \"uniform_average\" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric(\"mse\") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {'mse': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {'mse': 0.6123724356957945} If you're using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {'mse': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {'mse': array([0.41666667, 1. ])} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): def lowercase__ ( self): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types()) , reference_urls=[ """https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html""" ] , ) def lowercase__ ( self): '''simple docstring''' if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value("""float""")), "references": datasets.Sequence(datasets.Value("""float""")), } else: return { "predictions": datasets.Value("""float"""), "references": datasets.Value("""float"""), } def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="uniform_average" , SCREAMING_SNAKE_CASE_=True): '''simple docstring''' lowercase__ : Dict = mean_squared_error( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sample_weight=SCREAMING_SNAKE_CASE_ , multioutput=SCREAMING_SNAKE_CASE_ , squared=SCREAMING_SNAKE_CASE_) return {"mse": mse}
12
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase_ ) class _snake_case ( UpperCAmelCase_ ): def __init__( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_) if self.framework == "tf": raise ValueError(f'The {self.__class__} is only available in PyTorch.') requires_backends(self , """vision""") self.check_model_type(SCREAMING_SNAKE_CASE_) def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' if "text_queries" in kwargs: lowercase__ : Any = kwargs.pop("""text_queries""") if isinstance(SCREAMING_SNAKE_CASE_ , (str, Image.Image)): lowercase__ : Optional[Any] = {"""image""": image, """candidate_labels""": candidate_labels} else: lowercase__ : int = image lowercase__ : List[str] = super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) return results def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = {} if "threshold" in kwargs: lowercase__ : List[Any] = kwargs["""threshold"""] if "top_k" in kwargs: lowercase__ : int = kwargs["""top_k"""] return {}, {}, postprocess_params def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : str = load_image(inputs["""image"""]) lowercase__ : Any = inputs["""candidate_labels"""] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): lowercase__ : List[str] = candidate_labels.split(""",""") lowercase__ : Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa) for i, candidate_label in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework) lowercase__ : Union[str, Any] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework) yield { "is_last": i == len(SCREAMING_SNAKE_CASE_) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : str = model_inputs.pop("""target_size""") lowercase__ : Optional[int] = model_inputs.pop("""candidate_label""") lowercase__ : Dict = model_inputs.pop("""is_last""") lowercase__ : Union[str, Any] = self.model(**SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs} return model_outputs def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=None): '''simple docstring''' lowercase__ : Union[str, Any] = [] for model_output in model_outputs: lowercase__ : Optional[int] = model_output["""candidate_label"""] lowercase__ : Tuple = BaseModelOutput(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = self.image_processor.post_process_object_detection( outputs=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ , target_sizes=model_output["""target_size"""])[0] for index in outputs["scores"].nonzero(): lowercase__ : Optional[Any] = outputs["""scores"""][index].item() lowercase__ : Optional[Any] = self._get_bounding_box(outputs["""boxes"""][index][0]) lowercase__ : Tuple = {"""score""": score, """label""": label, """box""": box} results.append(SCREAMING_SNAKE_CASE_) lowercase__ : int = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: x["score"] , reverse=SCREAMING_SNAKE_CASE_) if top_k: lowercase__ : Any = results[:top_k] return results def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' if self.framework != "pt": raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""") lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = box.int().tolist() lowercase__ : Optional[int] = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
12
1
import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase__ : List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class _snake_case ( UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : List[Any] = ReformerTokenizer __lowerCAmelCase : str = ReformerTokenizerFast __lowerCAmelCase : List[Any] = True __lowerCAmelCase : Dict = False __lowerCAmelCase : Tuple = True def lowercase__ ( self): '''simple docstring''' super().setUp() lowercase__ : Any = ReformerTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_) tokenizer.save_pretrained(self.tmpdirname) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = """<s>""" lowercase__ : Optional[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_) , SCREAMING_SNAKE_CASE_) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_) , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : str = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , """<unk>""") self.assertEqual(vocab_keys[1] , """<s>""") self.assertEqual(vocab_keys[-1] , """j""") self.assertEqual(len(SCREAMING_SNAKE_CASE_) , 10_00) def lowercase__ ( self): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 10_00) def lowercase__ ( self): '''simple docstring''' if not self.test_rust_tokenizer: return lowercase__ : int = self.get_tokenizer() lowercase__ : Any = self.get_rust_tokenizer() lowercase__ : Optional[Any] = """I was born in 92000, and this is falsé.""" lowercase__ : Tuple = tokenizer.tokenize(SCREAMING_SNAKE_CASE_) lowercase__ : Dict = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : str = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Any = self.get_rust_tokenizer() lowercase__ : int = tokenizer.encode(SCREAMING_SNAKE_CASE_) lowercase__ : str = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_=15): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'): lowercase__ : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) # Simple input lowercase__ : List[Any] = """This is a simple input""" lowercase__ : int = ["""This is a simple input 1""", """This is a simple input 2"""] lowercase__ : str = ("""This is a simple input""", """This is a pair""") lowercase__ : Optional[int] = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""") # Simple input self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""") # Simple input self.assertRaises( SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""" , ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""") # Pair input self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""") # Pair input self.assertRaises( SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""" , ) def lowercase__ ( self): '''simple docstring''' pass def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = ReformerTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = tokenizer.tokenize("""This is a test""") self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]) self.assertListEqual( tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , [2_85, 46, 10, 1_70, 3_82] , ) lowercase__ : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""") self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowercase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) lowercase__ : Optional[int] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def lowercase__ ( self): '''simple docstring''' return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""") @slow def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = """Hello World!""" lowercase__ : str = [1_26, 32, 2_62, 1_52, 38, 72, 2_87] self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_)) @slow def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) lowercase__ : List[Any] = [ 1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 35, 28, 2_75, 3, 2_59, 2_97, 2_60, 84, 4, 35, 1_10, 44, 8, 2_59, 91, 2_68, 21, 11, 2_09, 2_74, 1_09, 2_66, 2_77, 1_17, 86, 93, 3_15, 2_58, 2_78, 2_58, 2_77, 2_58, 0, 2_58, 2_88, 2_58, 3_19, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 2_87, 2_58, 3_15, 2_58, 2_89, 2_58, 2_78, 99, 2_69, 2_66, 2_62, 8, 2_59, 2_41, 4, 2_17, 2_30, 2_68, 2_66, 55, 1_68, 1_06, 75, 1_93, 2_66, 2_23, 27, 49, 26, 2_82, 25, 2_64, 2_99, 19, 26, 0, 2_58, 2_77, 1_17, 86, 93, 1_76, 1_83, 2_70, 11, 2_62, 42, 61, 2_65, ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_)) @require_torch @slow def lowercase__ ( self): '''simple docstring''' import torch from transformers import ReformerConfig, ReformerModel # Build sequence lowercase__ : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys())[:10] lowercase__ : Optional[Any] = """ """.join(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = self.big_tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""") lowercase__ : Any = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""") lowercase__ : Dict = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) lowercase__ : Tuple = encoded_sequence["""input_ids"""].shape lowercase__ : Union[str, Any] = ReformerModel(SCREAMING_SNAKE_CASE_) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**SCREAMING_SNAKE_CASE_) model(**SCREAMING_SNAKE_CASE_) @slow def lowercase__ ( self): '''simple docstring''' lowercase__ : int = {"""input_ids""": [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 lowercase__ : Dict = [ """This is a very simple sentence.""", """The quick brown fox jumps over the lazy dog.""", ] self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE_ , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=SCREAMING_SNAKE_CASE_ , sequences=SCREAMING_SNAKE_CASE_ , )
12
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]: '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: lowercase__ : str = mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) else: lowercase__ : List[str] = max( mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) , mf_knapsack(i - 1 , lowercase_ , lowercase_ , j - wt[i - 1] ) + val[i - 1] , ) lowercase__ : List[Any] = val return f[i][j] def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str: '''simple docstring''' lowercase__ : Any = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: lowercase__ : List[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: lowercase__ : Tuple = dp[i - 1][w_] return dp[n][w_], dp def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' if not (isinstance(lowercase_ , (list, tuple) ) and isinstance(lowercase_ , (list, tuple) )): raise ValueError( """Both the weights and values vectors must be either lists or tuples""" ) lowercase__ : str = len(lowercase_ ) if num_items != len(lowercase_ ): lowercase__ : Optional[int] = ( """The number of weights must be the same as the number of values.\n""" F'But got {num_items} weights and {len(lowercase_ )} values' ) raise ValueError(lowercase_ ) for i in range(lowercase_ ): if not isinstance(wt[i] , lowercase_ ): lowercase__ : int = ( """All weights must be integers but got weight of """ F'type {type(wt[i] )} at index {i}' ) raise TypeError(lowercase_ ) lowercase__ , lowercase__ : Tuple = knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : set = set() _construct_solution(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) return optimal_val, example_optional_set def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any: '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(lowercase_ , lowercase_ , i - 1 , lowercase_ , lowercase_ ) else: optimal_set.add(lowercase_ ) _construct_solution(lowercase_ , lowercase_ , i - 1 , j - wt[i - 1] , lowercase_ ) if __name__ == "__main__": lowerCamelCase__ : Dict = [3, 2, 4, 4] lowerCamelCase__ : List[Any] = [4, 3, 2, 3] lowerCamelCase__ : Optional[int] = 4 lowerCamelCase__ : Dict = 6 lowerCamelCase__ : Optional[int] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] lowerCamelCase__ , lowerCamelCase__ : int = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 lowerCamelCase__ , lowerCamelCase__ : Optional[int] = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
12
1
import math def UpperCamelCase ( lowercase_ ) -> list[int]: '''simple docstring''' lowercase__ : Union[str, Any] = [] lowercase__ : Any = 2 lowercase__ : Tuple = int(math.sqrt(lowercase_ ) ) # Size of every segment lowercase__ : Optional[Any] = [True] * (end + 1) lowercase__ : int = [] while start <= end: if temp[start] is True: in_prime.append(lowercase_ ) for i in range(start * start , end + 1 , lowercase_ ): lowercase__ : Optional[int] = False start += 1 prime += in_prime lowercase__ : str = end + 1 lowercase__ : Any = min(2 * end , lowercase_ ) while low <= n: lowercase__ : Dict = [True] * (high - low + 1) for each in in_prime: lowercase__ : Any = math.floor(low / each ) * each if t < low: t += each for j in range(lowercase_ , high + 1 , lowercase_ ): lowercase__ : Optional[int] = False for j in range(len(lowercase_ ) ): if temp[j] is True: prime.append(j + low ) lowercase__ : Dict = high + 1 lowercase__ : Optional[int] = min(high + end , lowercase_ ) return prime print(sieve(1_0**6))
12
import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def UpperCamelCase ( lowercase_ ) -> Union[str, Any]: '''simple docstring''' return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def UpperCamelCase ( lowercase_ , lowercase_ ) -> List[Any]: '''simple docstring''' lowercase__ : int = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue lowercase__ : Optional[Any] = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" ) lowercase__ : Optional[Any] = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" ) lowercase__ : Optional[Any] = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" ) lowercase__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" ) lowercase__ : Optional[Any] = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" ) lowercase__ : Optional[int] = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" ) lowercase__ : List[Any] = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" ) lowercase__ : int = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" ) lowercase__ : Optional[Any] = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" ) lowercase__ : Optional[Any] = key.replace("""image_encoder.module""" , """flava.image_model""" ) lowercase__ : Any = key.replace("""text_encoder.module""" , """flava.text_model""" ) lowercase__ : Optional[Any] = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" ) lowercase__ : Tuple = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" ) lowercase__ : Any = key.replace("""text_projection""" , """flava.text_projection""" ) lowercase__ : List[Any] = key.replace("""image_projection""" , """flava.image_projection""" ) lowercase__ : str = value.float() for key, value in codebook_state_dict.items(): lowercase__ : Any = value return upgrade @torch.no_grad() def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Union[str, Any]: '''simple docstring''' if config_path is not None: lowercase__ : int = FlavaConfig.from_pretrained(lowercase_ ) else: lowercase__ : Optional[int] = FlavaConfig() lowercase__ : List[Any] = FlavaForPreTraining(lowercase_ ).eval() lowercase__ : Dict = convert_dalle_checkpoint(lowercase_ , lowercase_ , save_checkpoint=lowercase_ ) if os.path.exists(lowercase_ ): lowercase__ : Dict = torch.load(lowercase_ , map_location="""cpu""" ) else: lowercase__ : Dict = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" ) lowercase__ : int = upgrade_state_dict(lowercase_ , lowercase_ ) hf_model.load_state_dict(lowercase_ ) lowercase__ : Optional[int] = hf_model.state_dict() lowercase__ : Optional[int] = count_parameters(lowercase_ ) lowercase__ : Any = count_parameters(lowercase_ ) + count_parameters(lowercase_ ) assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) hf_model.save_pretrained(lowercase_ ) if __name__ == "__main__": lowerCamelCase__ : int = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""") parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") lowerCamelCase__ : List[str] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
12
1
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder lowerCamelCase__ : Any = datasets.utils.logging.get_logger(__name__) class _snake_case ( folder_based_builder.FolderBasedBuilderConfig ): __lowerCAmelCase : bool = None __lowerCAmelCase : bool = None class _snake_case ( folder_based_builder.FolderBasedBuilder ): __lowerCAmelCase : Optional[Any] = datasets.Audio() __lowerCAmelCase : Union[str, Any] = 'audio' __lowerCAmelCase : str = AudioFolderConfig __lowerCAmelCase : List[str] # definition at the bottom of the script __lowerCAmelCase : Optional[int] = AudioClassification(audio_column='audio' , label_column='label' ) lowerCamelCase__ : int = [ """.aiff""", """.au""", """.avr""", """.caf""", """.flac""", """.htk""", """.svx""", """.mat4""", """.mat5""", """.mpc2k""", """.ogg""", """.paf""", """.pvf""", """.raw""", """.rf64""", """.sd2""", """.sds""", """.ircam""", """.voc""", """.w64""", """.wav""", """.nist""", """.wavex""", """.wve""", """.xi""", """.mp3""", """.opus""", ] lowerCamelCase__ : int = AUDIO_EXTENSIONS
12
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _snake_case ( unittest.TestCase ): def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , ): '''simple docstring''' lowercase__ : List[str] = size if size is not None else {"""height""": 18, """width""": 18} lowercase__ : int = parent lowercase__ : Union[str, Any] = batch_size lowercase__ : List[str] = num_channels lowercase__ : str = image_size lowercase__ : int = min_resolution lowercase__ : Dict = max_resolution lowercase__ : Tuple = do_resize lowercase__ : Union[str, Any] = size lowercase__ : Any = do_normalize lowercase__ : Tuple = image_mean lowercase__ : str = image_std def lowercase__ ( self): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class _snake_case ( UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Optional[Any] = ViTImageProcessor if is_vision_available() else None def lowercase__ ( self): '''simple docstring''' lowercase__ : str = EfficientFormerImageProcessorTester(self) @property def lowercase__ ( self): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_mean""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_std""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size""")) def lowercase__ ( self): '''simple docstring''' pass def lowercase__ ( self): '''simple docstring''' lowercase__ : str = self.image_processing_class(**self.image_processor_dict) # create random PIL images lowercase__ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image) # Test not batched input lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched lowercase__ : str = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors lowercase__ : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray) # Test not batched input lowercase__ : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched lowercase__ : Dict = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors lowercase__ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor) # Test not batched input lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched lowercase__ : Any = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , )
12
1
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _snake_case ( unittest.TestCase ): def lowercase__ ( self): '''simple docstring''' super().tearDown() gc.collect() def lowercase__ ( self): '''simple docstring''' lowercase__ , lowercase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained( """stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , ) lowercase__ : Optional[int] = """A painting of a squirrel eating a burger""" lowercase__ : Optional[Any] = jax.device_count() lowercase__ : Optional[int] = num_samples * [prompt] lowercase__ : Tuple = sd_pipe.prepare_inputs(SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = replicate(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = shard(SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = jax.random.PRNGKey(0) lowercase__ : Tuple = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count()) lowercase__ : Optional[int] = sd_pipe(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_inference_steps=25 , jit=SCREAMING_SNAKE_CASE_)[0] assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3) lowercase__ : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) lowercase__ : Optional[Any] = images[0, 2_53:2_56, 2_53:2_56, -1] lowercase__ : Dict = jnp.asarray(jax.device_get(image_slice.flatten())) lowercase__ : Dict = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2]) print(f'output_slice: {output_slice}') assert jnp.abs(output_slice - expected_slice).max() < 1E-2 def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = """stabilityai/stable-diffusion-2""" lowercase__ , lowercase__ : List[str] = FlaxDPMSolverMultistepScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder="""scheduler""") lowercase__ , lowercase__ : int = FlaxStableDiffusionPipeline.from_pretrained( SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , revision="""bf16""" , dtype=jnp.bfloataa , ) lowercase__ : str = scheduler_params lowercase__ : List[Any] = """A painting of a squirrel eating a burger""" lowercase__ : List[Any] = jax.device_count() lowercase__ : str = num_samples * [prompt] lowercase__ : Optional[Any] = sd_pipe.prepare_inputs(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = replicate(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = shard(SCREAMING_SNAKE_CASE_) lowercase__ : str = jax.random.PRNGKey(0) lowercase__ : Any = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count()) lowercase__ : Tuple = sd_pipe(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_inference_steps=25 , jit=SCREAMING_SNAKE_CASE_)[0] assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3) lowercase__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) lowercase__ : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1] lowercase__ : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten())) lowercase__ : Union[str, Any] = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7]) print(f'output_slice: {output_slice}') assert jnp.abs(output_slice - expected_slice).max() < 1E-2
12
lowerCamelCase__ : dict[tuple[int, int, int], int] = {} def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on lowercase__ : Tuple = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one lowercase__ : Union[str, Any] = _calculate(days - 1 , lowercase_ , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 lowercase__ : List[str] = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter lowercase__ : Dict = _calculate(days - 1 , lowercase_ , 0 ) lowercase__ : List[str] = state_late + state_absent + state_ontime lowercase__ : List[Any] = prizestrings return prizestrings def UpperCamelCase ( lowercase_ = 30 ) -> int: '''simple docstring''' return _calculate(lowercase_ , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
12
1
import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class _snake_case : def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1_28 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ): '''simple docstring''' lowercase__ : Union[str, Any] = parent lowercase__ : Dict = batch_size lowercase__ : Union[str, Any] = seq_length lowercase__ : List[str] = is_training lowercase__ : List[Any] = use_input_mask lowercase__ : str = use_token_type_ids lowercase__ : str = use_labels lowercase__ : Union[str, Any] = vocab_size lowercase__ : List[Any] = hidden_size lowercase__ : Optional[Any] = num_hidden_layers lowercase__ : Union[str, Any] = num_attention_heads lowercase__ : List[Any] = intermediate_size lowercase__ : str = hidden_act lowercase__ : Any = hidden_dropout_prob lowercase__ : Optional[Any] = attention_probs_dropout_prob lowercase__ : Optional[Any] = max_position_embeddings lowercase__ : Dict = type_vocab_size lowercase__ : int = type_sequence_label_size lowercase__ : List[str] = initializer_range lowercase__ : Optional[Any] = num_labels lowercase__ : List[Any] = num_choices lowercase__ : str = scope def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ : int = None if self.use_input_mask: lowercase__ : str = random_attention_mask([self.batch_size, self.seq_length]) lowercase__ : str = None if self.use_token_type_ids: lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) lowercase__ : Dict = None lowercase__ : str = None lowercase__ : List[Any] = None if self.use_labels: lowercase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size) lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) lowercase__ : str = ids_tensor([self.batch_size] , self.num_choices) lowercase__ : Optional[int] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase__ ( self): '''simple docstring''' return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) def lowercase__ ( self): '''simple docstring''' ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) : int = self.prepare_config_and_inputs() lowercase__ : Dict = True lowercase__ : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = NezhaModel(config=SCREAMING_SNAKE_CASE_) model.to(SCREAMING_SNAKE_CASE_) model.eval() lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' lowercase__ : Optional[Any] = True lowercase__ : List[Any] = NezhaModel(SCREAMING_SNAKE_CASE_) model.to(SCREAMING_SNAKE_CASE_) model.eval() lowercase__ : str = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , ) lowercase__ : Dict = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , ) lowercase__ : int = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : str = NezhaForMaskedLM(config=SCREAMING_SNAKE_CASE_) model.to(SCREAMING_SNAKE_CASE_) model.eval() lowercase__ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Any = NezhaForNextSentencePrediction(config=SCREAMING_SNAKE_CASE_) model.to(SCREAMING_SNAKE_CASE_) model.eval() lowercase__ : int = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2)) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : List[str] = NezhaForPreTraining(config=SCREAMING_SNAKE_CASE_) model.to(SCREAMING_SNAKE_CASE_) model.eval() lowercase__ : List[str] = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , next_sentence_label=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2)) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Optional[int] = NezhaForQuestionAnswering(config=SCREAMING_SNAKE_CASE_) model.to(SCREAMING_SNAKE_CASE_) model.eval() lowercase__ : Any = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : str = self.num_labels lowercase__ : List[Any] = NezhaForSequenceClassification(SCREAMING_SNAKE_CASE_) model.to(SCREAMING_SNAKE_CASE_) model.eval() lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Optional[int] = self.num_labels lowercase__ : Optional[int] = NezhaForTokenClassification(config=SCREAMING_SNAKE_CASE_) model.to(SCREAMING_SNAKE_CASE_) model.eval() lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Optional[int] = self.num_choices lowercase__ : Union[str, Any] = NezhaForMultipleChoice(config=SCREAMING_SNAKE_CASE_) model.to(SCREAMING_SNAKE_CASE_) model.eval() lowercase__ : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() lowercase__ : Tuple = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() lowercase__ : Any = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() lowercase__ : Union[str, Any] = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = self.prepare_config_and_inputs() ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) : int = config_and_inputs lowercase__ : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : List[str] = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) __lowerCAmelCase : List[str] = ( { 'feature-extraction': NezhaModel, 'fill-mask': NezhaForMaskedLM, 'question-answering': NezhaForQuestionAnswering, 'text-classification': NezhaForSequenceClassification, 'token-classification': NezhaForTokenClassification, 'zero-shot': NezhaForSequenceClassification, } if is_torch_available() else {} ) __lowerCAmelCase : Optional[Any] = True def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False): '''simple docstring''' lowercase__ : Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_) if return_labels: if model_class in get_values(SCREAMING_SNAKE_CASE_): lowercase__ : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_) return inputs_dict def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = NezhaModelTester(self) lowercase__ : Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37) def lowercase__ ( self): '''simple docstring''' self.config_tester.run_common_tests() def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder() lowercase__ : str = None self.model_tester.create_and_check_model_as_decoder( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) def lowercase__ ( self): '''simple docstring''' lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_) @slow def lowercase__ ( self): '''simple docstring''' for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Tuple = NezhaModel.from_pretrained(SCREAMING_SNAKE_CASE_) self.assertIsNotNone(SCREAMING_SNAKE_CASE_) @slow @require_torch_gpu def lowercase__ ( self): '''simple docstring''' lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return lowercase__ : Optional[Any] = True lowercase__ : int = model_class(config=SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = torch.jit.trace( SCREAMING_SNAKE_CASE_ , (inputs_dict["""input_ids"""].to("""cpu"""), inputs_dict["""attention_mask"""].to("""cpu"""))) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , """bert.pt""")) lowercase__ : Tuple = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , """bert.pt""") , map_location=SCREAMING_SNAKE_CASE_) loaded(inputs_dict["""input_ids"""].to(SCREAMING_SNAKE_CASE_) , inputs_dict["""attention_mask"""].to(SCREAMING_SNAKE_CASE_)) @require_torch class _snake_case ( unittest.TestCase ): @slow def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""") lowercase__ : Dict = torch.tensor([[0, 1, 2, 3, 4, 5]]) lowercase__ : str = torch.tensor([[0, 1, 1, 1, 1, 1]]) with torch.no_grad(): lowercase__ : Any = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_)[0] lowercase__ : Any = torch.Size((1, 6, 7_68)) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_) lowercase__ : Dict = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]]) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4)) @slow def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""") lowercase__ : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]]) lowercase__ : List[str] = torch.tensor([[1, 1, 1, 1, 1, 1]]) with torch.no_grad(): lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_)[0] lowercase__ : Optional[Any] = torch.Size((1, 6, 2_11_28)) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_) lowercase__ : int = torch.tensor( [[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]]) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4))
12
import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def UpperCamelCase ( ) -> List[Any]: '''simple docstring''' raise RuntimeError("""CUDA out of memory.""" ) class _snake_case ( nn.Module ): def __init__( self): '''simple docstring''' super().__init__() lowercase__ : Optional[Any] = nn.Linear(3 , 4) lowercase__ : Union[str, Any] = nn.BatchNormad(4) lowercase__ : str = nn.Linear(4 , 5) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE_))) class _snake_case ( unittest.TestCase ): def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = [] @find_executable_batch_size(starting_batch_size=1_28) def mock_training_loop_function(SCREAMING_SNAKE_CASE_): nonlocal batch_sizes batch_sizes.append(SCREAMING_SNAKE_CASE_) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(SCREAMING_SNAKE_CASE_ , [1_28, 64, 32, 16, 8]) def lowercase__ ( self): '''simple docstring''' lowercase__ : int = [] @find_executable_batch_size(starting_batch_size=1_28) def mock_training_loop_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): nonlocal batch_sizes batch_sizes.append(SCREAMING_SNAKE_CASE_) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga lowercase__ , lowercase__ : int = mock_training_loop_function("""hello""") self.assertListEqual(SCREAMING_SNAKE_CASE_ , [1_28, 64, 32, 16, 8]) self.assertListEqual([bs, arga] , [8, """hello"""]) def lowercase__ ( self): '''simple docstring''' @find_executable_batch_size(starting_batch_size=0) def mock_training_loop_function(SCREAMING_SNAKE_CASE_): pass with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0]) def lowercase__ ( self): '''simple docstring''' @find_executable_batch_size(starting_batch_size=16) def mock_training_loop_function(SCREAMING_SNAKE_CASE_): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0]) def lowercase__ ( self): '''simple docstring''' @find_executable_batch_size(starting_batch_size=1_28) def mock_training_loop_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm: mock_training_loop_function(1_28 , """hello""" , """world""") self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0]) self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0]) def lowercase__ ( self): '''simple docstring''' @find_executable_batch_size(starting_batch_size=16) def mock_training_loop_function(SCREAMING_SNAKE_CASE_): raise ValueError("""Oops, we had an error!""") with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm: mock_training_loop_function() self.assertIn("""Oops, we had an error!""" , cm.exception.args[0]) @require_cuda def lowercase__ ( self): '''simple docstring''' lowercase__ : str = torch.cuda.memory_allocated() lowercase__ : str = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = release_memory(SCREAMING_SNAKE_CASE_) self.assertEqual(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE_)
12
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCamelCase__ : Union[str, Any] = 1_6 lowerCamelCase__ : List[Any] = 3_2 def UpperCamelCase ( lowercase_ , lowercase_ = 16 ) -> List[str]: '''simple docstring''' lowercase__ : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowercase__ : Optional[int] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(lowercase_ ): # max_length=None => use the model max length (it's actually the default) lowercase__ : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase_ , max_length=lowercase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowercase__ : List[str] = datasets.map( lowercase_ , batched=lowercase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowercase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. lowercase__ : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowercase__ : Dict = 16 elif accelerator.mixed_precision != "no": lowercase__ : str = 8 else: lowercase__ : Dict = None return tokenizer.pad( lowercase_ , padding="""longest""" , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , return_tensors="""pt""" , ) # Instantiate dataloaders. lowercase__ : int = DataLoader( tokenized_datasets["""train"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ ) lowercase__ : Optional[int] = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCamelCase__ : str = mocked_dataloaders # noqa: F811 def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase_ ) == "1": lowercase__ : List[str] = 2 # New Code # lowercase__ : List[str] = int(args.gradient_accumulation_steps ) # Initialize accelerator lowercase__ : Optional[int] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase_ ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( """Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ : Dict = config["""lr"""] lowercase__ : Dict = int(config["""num_epochs"""] ) lowercase__ : int = int(config["""seed"""] ) lowercase__ : int = int(config["""batch_size"""] ) lowercase__ : str = evaluate.load("""glue""" , """mrpc""" ) set_seed(lowercase_ ) lowercase__ , lowercase__ : Dict = get_dataloaders(lowercase_ , lowercase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ : int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowercase__ : str = model.to(accelerator.device ) # Instantiate optimizer lowercase__ : Any = AdamW(params=model.parameters() , lr=lowercase_ ) # Instantiate scheduler lowercase__ : Optional[int] = get_linear_schedule_with_warmup( optimizer=lowercase_ , num_warmup_steps=1_00 , num_training_steps=(len(lowercase_ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = accelerator.prepare( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Now we train the model for epoch in range(lowercase_ ): model.train() for step, batch in enumerate(lowercase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(lowercase_ ): lowercase__ : List[str] = model(**lowercase_ ) lowercase__ : Any = output.loss accelerator.backward(lowercase_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase__ : List[str] = model(**lowercase_ ) lowercase__ : Optional[Any] = outputs.logits.argmax(dim=-1 ) lowercase__ , lowercase__ : Any = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowercase_ , references=lowercase_ , ) lowercase__ : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , lowercase_ ) def UpperCamelCase ( ) -> str: '''simple docstring''' lowercase__ : int = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=lowercase_ , default=lowercase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) # New Code # parser.add_argument( """--gradient_accumulation_steps""" , type=lowercase_ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) lowercase__ : str = parser.parse_args() lowercase__ : List[Any] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowercase_ , lowercase_ ) if __name__ == "__main__": main()
12
import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def UpperCamelCase ( lowercase_ ) -> Any: '''simple docstring''' lowercase__ : Optional[Any] = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowercase__ : List[str] = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: lowercase__ : Optional[int] = 4 lowercase__ : Optional[Any] = 48 lowercase__ : int = """pixelshuffle_aux""" elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowercase__ : List[str] = [6, 6, 6, 6] lowercase__ : Any = 60 lowercase__ : Tuple = [6, 6, 6, 6] lowercase__ : Dict = """pixelshuffledirect""" elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowercase__ : Tuple = 4 lowercase__ : Any = """nearest+conv""" elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: lowercase__ : str = 1 lowercase__ : Optional[int] = 1 lowercase__ : Optional[int] = 1_26 lowercase__ : Any = 7 lowercase__ : int = 255.0 lowercase__ : List[Any] = """""" return config def UpperCamelCase ( lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' if "patch_embed.proj" in name and "layers" not in name: lowercase__ : Dict = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: lowercase__ : Dict = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" ) if "layers" in name: lowercase__ : List[str] = name.replace("""layers""" , """encoder.stages""" ) if "residual_group.blocks" in name: lowercase__ : Optional[int] = name.replace("""residual_group.blocks""" , """layers""" ) if "attn.proj" in name: lowercase__ : int = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: lowercase__ : Tuple = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: lowercase__ : int = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: lowercase__ : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: lowercase__ : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowercase__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" ) if "q_bias" in name: lowercase__ : Any = name.replace("""q_bias""" , """query.bias""" ) if "k_bias" in name: lowercase__ : Optional[Any] = name.replace("""k_bias""" , """key.bias""" ) if "v_bias" in name: lowercase__ : Dict = name.replace("""v_bias""" , """value.bias""" ) if "cpb_mlp" in name: lowercase__ : Union[str, Any] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" ) if "patch_embed.proj" in name: lowercase__ : List[Any] = name.replace("""patch_embed.proj""" , """patch_embed.projection""" ) if name == "norm.weight": lowercase__ : Union[str, Any] = """layernorm.weight""" if name == "norm.bias": lowercase__ : List[str] = """layernorm.bias""" if "conv_first" in name: lowercase__ : Union[str, Any] = name.replace("""conv_first""" , """first_convolution""" ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: lowercase__ : List[Any] = name.replace("""conv_last""" , """final_convolution""" ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: lowercase__ : Optional[int] = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" ) if "upsample.0" in name: lowercase__ : Dict = name.replace("""upsample.0""" , """upsample.convolution_0""" ) if "upsample.2" in name: lowercase__ : Optional[Any] = name.replace("""upsample.2""" , """upsample.convolution_1""" ) lowercase__ : List[str] = """upsample.""" + name elif config.upsampler == "pixelshuffledirect": lowercase__ : Optional[Any] = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" ) lowercase__ : int = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" ) else: pass else: lowercase__ : str = """swin2sr.""" + name return name def UpperCamelCase ( lowercase_ , lowercase_ ) -> int: '''simple docstring''' for key in orig_state_dict.copy().keys(): lowercase__ : str = orig_state_dict.pop(lowercase_ ) if "qkv" in key: lowercase__ : Any = key.split(""".""" ) lowercase__ : List[Any] = int(key_split[1] ) lowercase__ : Dict = int(key_split[4] ) lowercase__ : Optional[Any] = config.embed_dim if "weight" in key: lowercase__ : List[str] = val[:dim, :] lowercase__ : List[str] = val[dim : dim * 2, :] lowercase__ : Optional[Any] = val[-dim:, :] else: lowercase__ : Optional[Any] = val[:dim] lowercase__ : List[Any] = val[dim : dim * 2] lowercase__ : Optional[int] = val[-dim:] pass else: lowercase__ : Optional[Any] = val return orig_state_dict def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' lowercase__ : Dict = get_config(lowercase_ ) lowercase__ : Any = SwinaSRForImageSuperResolution(lowercase_ ) model.eval() lowercase__ : List[str] = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" ) lowercase__ : Union[str, Any] = convert_state_dict(lowercase_ , lowercase_ ) lowercase__ , lowercase__ : Dict = model.load_state_dict(lowercase_ , strict=lowercase_ ) if len(lowercase_ ) > 0: raise ValueError("""Missing keys when converting: {}""".format(lowercase_ ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(F'Unexpected key {key} in state_dict' ) # verify values lowercase__ : Any = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true""" lowercase__ : Any = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert("""RGB""" ) lowercase__ : Any = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values lowercase__ : Optional[int] = 1_26 if """Jpeg""" in checkpoint_url else 2_56 lowercase__ : Union[str, Any] = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) lowercase__ : Dict = transforms(lowercase_ ).unsqueeze(0 ) if config.num_channels == 1: lowercase__ : Any = pixel_values[:, 0, :, :].unsqueeze(1 ) lowercase__ : Union[str, Any] = model(lowercase_ ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: lowercase__ : Optional[Any] = torch.Size([1, 3, 5_12, 5_12] ) lowercase__ : Optional[Any] = torch.tensor( [[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowercase__ : List[str] = torch.Size([1, 3, 10_24, 10_24] ) lowercase__ : int = torch.tensor( [[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here lowercase__ : Optional[Any] = torch.Size([1, 3, 10_24, 10_24] ) lowercase__ : int = torch.tensor( [[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowercase__ : Tuple = torch.Size([1, 3, 5_12, 5_12] ) lowercase__ : int = torch.tensor( [[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowercase__ : Tuple = torch.Size([1, 3, 10_24, 10_24] ) lowercase__ : int = torch.tensor( [[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] ) assert ( outputs.reconstruction.shape == expected_shape ), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowercase_ , atol=1E-3 ) print("""Looks ok!""" ) lowercase__ : str = { """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": ( """swin2SR-classical-sr-x2-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": ( """swin2SR-classical-sr-x4-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": ( """swin2SR-compressed-sr-x4-48""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": ( """swin2SR-lightweight-x2-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": ( """swin2SR-realworld-sr-x4-64-bsrgan-psnr""" ), } lowercase__ : str = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowercase_ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(lowercase_ ) if push_to_hub: model.push_to_hub(F'caidas/{model_name}' ) processor.push_to_hub(F'caidas/{model_name}' ) if __name__ == "__main__": lowerCamelCase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""", type=str, help="""URL of the original Swin2SR checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""") lowerCamelCase__ : Any = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
12
1
import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=None ) -> Dict: '''simple docstring''' assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match' lowercase__ : List[str] = nn.Parameter(lowercase_ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match' lowercase__ : List[Any] = nn.Parameter(lowercase_ ) def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]: '''simple docstring''' lowercase__ : int = np.asarray(weights[0] ) lowercase__ : Tuple = np.asarray(weights[1] ) lowercase__ : List[Any] = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(lowercase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowercase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase_ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowercase_ ).view(-1 , lowercase_ ).contiguous().transpose(0 , 1 ) , ) def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]: '''simple docstring''' lowercase__ : Optional[Any] = np.asarray(weights[0] ) lowercase__ : Optional[int] = np.asarray(weights[1] ) lowercase__ : Union[str, Any] = np.asarray(weights[2] ) lowercase__ : Any = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(lowercase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase_ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(lowercase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowercase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase_ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowercase_ ).view(-1 , lowercase_ ).contiguous().transpose(0 , 1 ) , ) def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' lowercase__ : Any = weights[0][0][0] lowercase__ : Tuple = np.asarray(layer_norm_a[0] ) lowercase__ : Any = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(lowercase_ ) , torch.tensor(lowercase_ ) , ) # lsh weights + output lowercase__ : List[Any] = weights[0][1] if len(lowercase_ ) < 4: set_layer_weights_in_torch_lsh(lowercase_ , torch_block.attention , lowercase_ ) else: set_layer_weights_in_torch_local(lowercase_ , torch_block.attention , lowercase_ ) # intermediate weighs lowercase__ : Any = weights[2][0][1][2] # Chunked Feed Forward if len(lowercase_ ) == 4: lowercase__ : Union[str, Any] = intermediate_weights[2] # layernorm 2 lowercase__ : Tuple = np.asarray(intermediate_weights[0][0] ) lowercase__ : int = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(lowercase_ ) , torch.tensor(lowercase_ ) , ) # intermediate dense lowercase__ : Optional[Any] = np.asarray(intermediate_weights[1][0] ) lowercase__ : Any = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(lowercase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowercase_ ) , ) # intermediate out lowercase__ : List[Any] = np.asarray(intermediate_weights[4][0] ) lowercase__ : Optional[Any] = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(lowercase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowercase_ ) , ) def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> str: '''simple docstring''' lowercase__ : Dict = torch_model.reformer # word embeds lowercase__ : Any = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowercase_ ) , ) if isinstance(weights[3] , lowercase_ ): lowercase__ : List[Any] = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): lowercase__ : List[str] = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F'{position_embeddings[emb_idx]} emb does not match' lowercase__ : Union[str, Any] = nn.Parameter(torch.tensor(lowercase_ ) ) lowercase__ : Union[str, Any] = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( lowercase_ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): lowercase__ : List[str] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(lowercase_ , lowercase_ , lowercase_ ) # output layer norm lowercase__ : int = np.asarray(weights[7][0] ) lowercase__ : str = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(lowercase_ ) , torch.tensor(lowercase_ ) , ) # output embeddings lowercase__ : List[Any] = np.asarray(weights[9][0] ) lowercase__ : int = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(lowercase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowercase_ ) , ) def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' lowercase__ : Union[str, Any] = ReformerConfig.from_json_file(lowercase_ ) print(F'Building PyTorch model from configuration: {config}' ) lowercase__ : Union[str, Any] = ReformerModelWithLMHead(lowercase_ ) with open(lowercase_ , """rb""" ) as f: lowercase__ : Tuple = pickle.load(lowercase_ )["""weights"""] set_model_weights_in_torch(lowercase_ , lowercase_ , config.hidden_size ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , lowercase_ ) if __name__ == "__main__": lowerCamelCase__ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( """--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained Reformer model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowerCamelCase__ : int = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
12
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : BigBirdConfig __lowerCAmelCase : jnp.dtype = jnp.floataa __lowerCAmelCase : bool = True def lowercase__ ( self): '''simple docstring''' super().setup() lowercase__ : Dict = nn.Dense(5 , dtype=self.dtype) def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : List[str] = super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = self.cls(outputs[2]) return outputs[:2] + (cls_out,) class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Optional[int] = FlaxBigBirdForNaturalQuestionsModule def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' def cross_entropy(lowercase_ , lowercase_ , lowercase_=None ): lowercase__ : int = logits.shape[-1] lowercase__ : List[str] = (labels[..., None] == jnp.arange(lowercase_ )[None]).astype("""f4""" ) lowercase__ : int = jax.nn.log_softmax(lowercase_ , axis=-1 ) lowercase__ : Any = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: lowercase__ : Optional[int] = reduction(lowercase_ ) return loss lowercase__ : int = partial(lowercase_ , reduction=jnp.mean ) lowercase__ : Tuple = cross_entropy(lowercase_ , lowercase_ ) lowercase__ : List[Any] = cross_entropy(lowercase_ , lowercase_ ) lowercase__ : Union[str, Any] = cross_entropy(lowercase_ , lowercase_ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class _snake_case : __lowerCAmelCase : str = "google/bigbird-roberta-base" __lowerCAmelCase : int = 3_000 __lowerCAmelCase : int = 10_500 __lowerCAmelCase : int = 128 __lowerCAmelCase : int = 3 __lowerCAmelCase : int = 1 __lowerCAmelCase : int = 5 # tx_args __lowerCAmelCase : float = 3e-5 __lowerCAmelCase : float = 0.0 __lowerCAmelCase : int = 20_000 __lowerCAmelCase : float = 0.0_095 __lowerCAmelCase : str = "bigbird-roberta-natural-questions" __lowerCAmelCase : str = "training-expt" __lowerCAmelCase : str = "data/nq-training.jsonl" __lowerCAmelCase : str = "data/nq-validation.jsonl" def lowercase__ ( self): '''simple docstring''' os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE_) lowercase__ : Any = os.path.join(self.base_dir , self.save_dir) lowercase__ : str = self.batch_size_per_device * jax.device_count() @dataclass class _snake_case : __lowerCAmelCase : int __lowerCAmelCase : int = 4_096 # no dynamic padding on TPUs def __call__( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Dict = self.collate_fn(SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) return batch def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ , lowercase__ : str = self.fetch_inputs(features["""input_ids"""]) lowercase__ : str = { """input_ids""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa), """attention_mask""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa), """start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa), """end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa), """pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa), } return batch def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : List[Any] = [self._fetch_inputs(SCREAMING_SNAKE_CASE_) for ids in input_ids] return zip(*SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = [1 for _ in range(len(SCREAMING_SNAKE_CASE_))] while len(SCREAMING_SNAKE_CASE_) < self.max_length: input_ids.append(self.pad_id) attention_mask.append(0) return input_ids, attention_mask def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=None ) -> Optional[Any]: '''simple docstring''' if seed is not None: lowercase__ : Any = dataset.shuffle(seed=lowercase_ ) for i in range(len(lowercase_ ) // batch_size ): lowercase__ : List[str] = dataset[i * batch_size : (i + 1) * batch_size] yield dict(lowercase_ ) @partial(jax.pmap , axis_name="""batch""" ) def UpperCamelCase ( lowercase_ , lowercase_ , **lowercase_ ) -> int: '''simple docstring''' def loss_fn(lowercase_ ): lowercase__ : Dict = model_inputs.pop("""start_labels""" ) lowercase__ : List[Any] = model_inputs.pop("""end_labels""" ) lowercase__ : List[Any] = model_inputs.pop("""pooled_labels""" ) lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=lowercase_ , dropout_rng=lowercase_ , train=lowercase_ ) lowercase__ , lowercase__ , lowercase__ : Any = outputs return state.loss_fn( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) lowercase__ , lowercase__ : Optional[int] = jax.random.split(lowercase_ ) lowercase__ : Tuple = jax.value_and_grad(lowercase_ ) lowercase__ , lowercase__ : Optional[int] = grad_fn(state.params ) lowercase__ : Tuple = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) lowercase__ : Any = jax.lax.pmean(lowercase_ , """batch""" ) lowercase__ : str = state.apply_gradients(grads=lowercase_ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="""batch""" ) def UpperCamelCase ( lowercase_ , **lowercase_ ) -> str: '''simple docstring''' lowercase__ : Tuple = model_inputs.pop("""start_labels""" ) lowercase__ : List[str] = model_inputs.pop("""end_labels""" ) lowercase__ : int = model_inputs.pop("""pooled_labels""" ) lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=state.params , train=lowercase_ ) lowercase__ , lowercase__ , lowercase__ : Optional[int] = outputs lowercase__ : Optional[Any] = state.loss_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : List[str] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) return metrics class _snake_case ( train_state.TrainState ): __lowerCAmelCase : Callable = struct.field(pytree_node=UpperCAmelCase_ ) @dataclass class _snake_case : __lowerCAmelCase : Args __lowerCAmelCase : Callable __lowerCAmelCase : Callable __lowerCAmelCase : Callable __lowerCAmelCase : Callable __lowerCAmelCase : wandb __lowerCAmelCase : Callable = None def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None): '''simple docstring''' lowercase__ : List[str] = model.params lowercase__ : Dict = TrainState.create( apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , loss_fn=SCREAMING_SNAKE_CASE_ , ) if ckpt_dir is not None: lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = restore_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : str = { """lr""": args.lr, """init_lr""": args.init_lr, """warmup_steps""": args.warmup_steps, """num_train_steps""": num_train_steps, """weight_decay""": args.weight_decay, } lowercase__ , lowercase__ : Any = build_tx(**SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = train_state.TrainState( step=SCREAMING_SNAKE_CASE_ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , opt_state=SCREAMING_SNAKE_CASE_ , ) lowercase__ : Optional[Any] = args lowercase__ : Union[str, Any] = data_collator lowercase__ : str = lr lowercase__ : Union[str, Any] = params lowercase__ : Dict = jax_utils.replicate(SCREAMING_SNAKE_CASE_) return state def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = self.args lowercase__ : List[str] = len(SCREAMING_SNAKE_CASE_) // args.batch_size lowercase__ : int = jax.random.PRNGKey(0) lowercase__ : Union[str, Any] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count()) for epoch in range(args.max_epochs): lowercase__ : Tuple = jnp.array(0 , dtype=jnp.floataa) lowercase__ : List[str] = get_batched_dataset(SCREAMING_SNAKE_CASE_ , args.batch_size , seed=SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = 0 for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc=f'Running EPOCH-{epoch}'): lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_) lowercase__ , lowercase__ , lowercase__ : List[Any] = self.train_step_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) running_loss += jax_utils.unreplicate(metrics["""loss"""]) i += 1 if i % args.logging_steps == 0: lowercase__ : List[str] = jax_utils.unreplicate(state.step) lowercase__ : str = running_loss.item() / i lowercase__ : Tuple = self.scheduler_fn(state_step - 1) lowercase__ : Tuple = self.evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = { """step""": state_step.item(), """eval_loss""": eval_loss.item(), """tr_loss""": tr_loss, """lr""": lr.item(), } tqdm.write(str(SCREAMING_SNAKE_CASE_)) self.logger.log(SCREAMING_SNAKE_CASE_ , commit=SCREAMING_SNAKE_CASE_) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Dict = get_batched_dataset(SCREAMING_SNAKE_CASE_ , self.args.batch_size) lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE_) // self.args.batch_size lowercase__ : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa) lowercase__ : Optional[Any] = 0 for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc="""Evaluating ... """): lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = self.val_step_fn(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) running_loss += jax_utils.unreplicate(metrics["""loss"""]) i += 1 return running_loss / i def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = jax_utils.unreplicate(SCREAMING_SNAKE_CASE_) print(f'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """) self.model_save_fn(SCREAMING_SNAKE_CASE_ , params=state.params) with open(os.path.join(SCREAMING_SNAKE_CASE_ , """opt_state.msgpack""") , """wb""") as f: f.write(to_bytes(state.opt_state)) joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE_ , """args.joblib""")) joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE_ , """data_collator.joblib""")) with open(os.path.join(SCREAMING_SNAKE_CASE_ , """training_state.json""") , """w""") as f: json.dump({"""step""": state.step.item()} , SCREAMING_SNAKE_CASE_) print("""DONE""") def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ ) with open(os.path.join(lowercase_ , """flax_model.msgpack""" ) , """rb""" ) as f: lowercase__ : Optional[Any] = from_bytes(state.params , f.read() ) with open(os.path.join(lowercase_ , """opt_state.msgpack""" ) , """rb""" ) as f: lowercase__ : Dict = from_bytes(state.opt_state , f.read() ) lowercase__ : Any = joblib.load(os.path.join(lowercase_ , """args.joblib""" ) ) lowercase__ : Optional[int] = joblib.load(os.path.join(lowercase_ , """data_collator.joblib""" ) ) with open(os.path.join(lowercase_ , """training_state.json""" ) , """r""" ) as f: lowercase__ : int = json.load(lowercase_ ) lowercase__ : Optional[Any] = training_state["""step"""] print("""DONE""" ) return params, opt_state, step, args, data_collator def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' lowercase__ : Optional[int] = num_train_steps - warmup_steps lowercase__ : int = optax.linear_schedule(init_value=lowercase_ , end_value=lowercase_ , transition_steps=lowercase_ ) lowercase__ : Optional[int] = optax.linear_schedule(init_value=lowercase_ , end_value=1E-7 , transition_steps=lowercase_ ) lowercase__ : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]: '''simple docstring''' def weight_decay_mask(lowercase_ ): lowercase__ : Dict = traverse_util.flatten_dict(lowercase_ ) lowercase__ : int = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()} return traverse_util.unflatten_dict(lowercase_ ) lowercase__ : Optional[int] = scheduler_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : int = optax.adamw(learning_rate=lowercase_ , weight_decay=lowercase_ , mask=lowercase_ ) return tx, lr
12
1
import argparse import os import re lowerCamelCase__ : str = """src/diffusers""" # Pattern that looks at the indentation in a line. lowerCamelCase__ : str = re.compile(R"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. lowerCamelCase__ : Optional[Any] = re.compile(R"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowerCamelCase__ : str = re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. lowerCamelCase__ : Any = re.compile(R"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowerCamelCase__ : Union[str, Any] = re.compile(R"""\[([^\]]+)\]""") def UpperCamelCase ( lowercase_ ) -> int: '''simple docstring''' lowercase__ : Optional[Any] = _re_indent.search(lowercase_ ) return "" if search is None else search.groups()[0] def UpperCamelCase ( lowercase_ , lowercase_="" , lowercase_=None , lowercase_=None ) -> Optional[int]: '''simple docstring''' lowercase__ : int = 0 lowercase__ : List[Any] = code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(lowercase_ ): index += 1 lowercase__ : Dict = ["""\n""".join(lines[:index] )] else: lowercase__ : Dict = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). lowercase__ : str = [lines[index]] index += 1 while index < len(lowercase_ ) and (end_prompt is None or not lines[index].startswith(lowercase_ )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(lowercase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(lowercase_ ) ) if index < len(lowercase_ ) - 1: lowercase__ : Union[str, Any] = [lines[index + 1]] index += 1 else: lowercase__ : Union[str, Any] = [] else: blocks.append("""\n""".join(lowercase_ ) ) lowercase__ : List[Any] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(lowercase_ ) > 0: blocks.append("""\n""".join(lowercase_ ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(lowercase_ ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def UpperCamelCase ( lowercase_ ) -> List[Any]: '''simple docstring''' def _inner(lowercase_ ): return key(lowercase_ ).lower().replace("""_""" , """""" ) return _inner def UpperCamelCase ( lowercase_ , lowercase_=None ) -> str: '''simple docstring''' def noop(lowercase_ ): return x if key is None: lowercase__ : str = noop # Constants are all uppercase, they go first. lowercase__ : List[Any] = [obj for obj in objects if key(lowercase_ ).isupper()] # Classes are not all uppercase but start with a capital, they go second. lowercase__ : List[str] = [obj for obj in objects if key(lowercase_ )[0].isupper() and not key(lowercase_ ).isupper()] # Functions begin with a lowercase, they go last. lowercase__ : Tuple = [obj for obj in objects if not key(lowercase_ )[0].isupper()] lowercase__ : Optional[Any] = ignore_underscore(lowercase_ ) return sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ ) def UpperCamelCase ( lowercase_ ) -> Dict: '''simple docstring''' def _replace(lowercase_ ): lowercase__ : int = match.groups()[0] if "," not in imports: return F'[{imports}]' lowercase__ : Tuple = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowercase__ : str = keys[:-1] return "[" + ", ".join([F'"{k}"' for k in sort_objects(lowercase_ )] ) + "]" lowercase__ : Optional[int] = import_statement.split("""\n""" ) if len(lowercase_ ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. lowercase__ : List[str] = 2 if lines[1].strip() == """[""" else 1 lowercase__ : Union[str, Any] = [(i, _re_strip_line.search(lowercase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] lowercase__ : Any = sort_objects(lowercase_ , key=lambda lowercase_ : x[1] ) lowercase__ : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(lowercase_ ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: lowercase__ : Any = _re_bracket_content.sub(_replace , lines[1] ) else: lowercase__ : Tuple = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowercase__ : List[str] = keys[:-1] lowercase__ : List[str] = get_indent(lines[1] ) + """, """.join([F'"{k}"' for k in sort_objects(lowercase_ )] ) return "\n".join(lowercase_ ) else: # Finally we have to deal with imports fitting on one line lowercase__ : str = _re_bracket_content.sub(_replace , lowercase_ ) return import_statement def UpperCamelCase ( lowercase_ , lowercase_=True ) -> int: '''simple docstring''' with open(lowercase_ , """r""" ) as f: lowercase__ : List[Any] = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 lowercase__ : Optional[Any] = split_code_in_indented_blocks( lowercase_ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(lowercase_ ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. lowercase__ : Any = main_blocks[block_idx] lowercase__ : Union[str, Any] = block.split("""\n""" ) # Get to the start of the imports. lowercase__ : int = 0 while line_idx < len(lowercase_ ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: lowercase__ : List[str] = len(lowercase_ ) else: line_idx += 1 if line_idx >= len(lowercase_ ): continue # Ignore beginning and last line: they don't contain anything. lowercase__ : List[Any] = """\n""".join(block_lines[line_idx:-1] ) lowercase__ : Optional[Any] = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. lowercase__ : Union[str, Any] = split_code_in_indented_blocks(lowercase_ , indent_level=lowercase_ ) # We have two categories of import key: list or _import_structure[key].append/extend lowercase__ : Tuple = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. lowercase__ : Dict = [(pattern.search(lowercase_ ).groups()[0] if pattern.search(lowercase_ ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. lowercase__ : Any = [(i, key) for i, key in enumerate(lowercase_ ) if key is not None] lowercase__ : List[str] = [x[0] for x in sorted(lowercase_ , key=lambda lowercase_ : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. lowercase__ : Any = 0 lowercase__ : Optional[int] = [] for i in range(len(lowercase_ ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: lowercase__ : List[str] = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(lowercase_ ) count += 1 # And we put our main block back together with its first and last line. lowercase__ : List[Any] = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(lowercase_ ): if check_only: return True else: print(F'Overwriting {file}.' ) with open(lowercase_ , """w""" ) as f: f.write("""\n""".join(lowercase_ ) ) def UpperCamelCase ( lowercase_=True ) -> int: '''simple docstring''' lowercase__ : Optional[Any] = [] for root, _, files in os.walk(lowercase_ ): if "__init__.py" in files: lowercase__ : int = sort_imports(os.path.join(lowercase_ , """__init__.py""" ) , check_only=lowercase_ ) if result: lowercase__ : Dict = [os.path.join(lowercase_ , """__init__.py""" )] if len(lowercase_ ) > 0: raise ValueError(F'Would overwrite {len(lowercase_ )} files, run `make style`.' ) if __name__ == "__main__": lowerCamelCase__ : List[str] = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") lowerCamelCase__ : str = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
12
lowerCamelCase__ : List[str] = """ # Installazione di Transformers ! pip install transformers datasets # Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git """ lowerCamelCase__ : List[Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowerCamelCase__ : int = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
12
1
# flake8: noqa # Lint as: python3 lowerCamelCase__ : Tuple = [ """VerificationMode""", """Version""", """disable_progress_bar""", """enable_progress_bar""", """is_progress_bar_enabled""", """experimental""", ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
12
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class _snake_case : def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ): '''simple docstring''' lowercase__ : str = parent lowercase__ : Optional[int] = batch_size lowercase__ : Optional[int] = seq_length lowercase__ : Union[str, Any] = is_training lowercase__ : Any = use_input_mask lowercase__ : Optional[int] = use_token_type_ids lowercase__ : Optional[Any] = use_labels lowercase__ : Optional[int] = vocab_size lowercase__ : Optional[Any] = hidden_size lowercase__ : Any = rotary_dim lowercase__ : Optional[Any] = num_hidden_layers lowercase__ : Tuple = num_attention_heads lowercase__ : Tuple = intermediate_size lowercase__ : List[str] = hidden_act lowercase__ : Optional[Any] = hidden_dropout_prob lowercase__ : int = attention_probs_dropout_prob lowercase__ : Any = max_position_embeddings lowercase__ : Optional[int] = initializer_range lowercase__ : Optional[int] = None lowercase__ : str = vocab_size - 1 lowercase__ : Any = vocab_size - 1 lowercase__ : Dict = vocab_size - 1 def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ : Any = None if self.use_input_mask: lowercase__ : Dict = random_attention_mask([self.batch_size, self.seq_length]) lowercase__ : List[Any] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Optional[Any] = config_and_inputs lowercase__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = 20 lowercase__ : int = model_class_name(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_) lowercase__ : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""") lowercase__ : Tuple = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1)) lowercase__ : List[str] = model( input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""") lowercase__ : str = model( input_ids[:, -1:] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE_) lowercase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}') def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Union[str, Any] = 20 lowercase__ : List[Any] = model_class_name(SCREAMING_SNAKE_CASE_) lowercase__ : Dict = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , ) lowercase__ : Dict = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1)) lowercase__ : Any = model( input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""") lowercase__ : Tuple = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_) lowercase__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}') @require_flax class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Dict = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __lowerCAmelCase : str = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = FlaxGPTJModelTester(self) def lowercase__ ( self): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__ , lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__ , lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) @tooslow def lowercase__ ( self): '''simple docstring''' lowercase__ : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""") lowercase__ : List[str] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""") lowercase__ : Optional[Any] = False lowercase__ : List[str] = model.config.eos_token_id lowercase__ : List[Any] = jax.jit(model.generate) lowercase__ : Tuple = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id).sequences lowercase__ : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) @is_pt_flax_cross_test def lowercase__ ( self): '''simple docstring''' lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Any = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning lowercase__ : str = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ , lowercase__ : Dict = pt_inputs["""input_ids"""].shape lowercase__ : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,)) for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : str = 0 lowercase__ : List[Any] = 1 lowercase__ : Dict = 0 lowercase__ : Any = 1 lowercase__ : List[Any] = pt_model_class(SCREAMING_SNAKE_CASE_).eval() lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa) lowercase__ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = fx_state with torch.no_grad(): lowercase__ : Optional[int] = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple() lowercase__ : Dict = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_) lowercase__ : str = fx_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual( len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output_loaded, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2) @is_pt_flax_cross_test def lowercase__ ( self): '''simple docstring''' lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs lowercase__ : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : str = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning lowercase__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : str = pt_model_class(SCREAMING_SNAKE_CASE_).eval() lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa) lowercase__ : Optional[int] = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , fx_model.params) lowercase__ , lowercase__ : str = pt_inputs["""input_ids"""].shape lowercase__ : List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,)) for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Tuple = 0 lowercase__ : int = 1 lowercase__ : str = 0 lowercase__ : str = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): lowercase__ : Dict = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple() lowercase__ : Optional[Any] = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = pt_model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_flax=SCREAMING_SNAKE_CASE_) with torch.no_grad(): lowercase__ : Tuple = pt_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual( len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) @tooslow def lowercase__ ( self): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__ : Any = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""") lowercase__ : int = model(np.ones((1, 1))) self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
12
1
from __future__ import annotations def UpperCamelCase ( lowercase_ ) -> float: '''simple docstring''' if not nums: raise ValueError("""List is empty""" ) return sum(lowercase_ ) / len(lowercase_ ) if __name__ == "__main__": import doctest doctest.testmod()
12
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Any = ['image_processor', 'tokenizer'] __lowerCAmelCase : Union[str, Any] = 'AutoImageProcessor' __lowerCAmelCase : int = 'AutoTokenizer' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = self.image_processor def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""") if text is not None: lowercase__ : List[str] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) if images is not None: lowercase__ : Optional[int] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) if text is not None and images is not None: lowercase__ : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_) , tensor_type=SCREAMING_SNAKE_CASE_) def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) @property def lowercase__ ( self): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
12
1
def UpperCamelCase ( lowercase_ ) -> int: '''simple docstring''' if not numbers: return 0 if not isinstance(lowercase_ , (list, tuple) ) or not all( isinstance(lowercase_ , lowercase_ ) for number in numbers ): raise ValueError("""numbers must be an iterable of integers""" ) lowercase__ : Dict = numbers[0] for i in range(1 , len(lowercase_ ) ): # update the maximum and minimum subarray products lowercase__ : Optional[int] = numbers[i] if number < 0: lowercase__ , lowercase__ : Any = min_till_now, max_till_now lowercase__ : List[str] = max(lowercase_ , max_till_now * number ) lowercase__ : Any = min(lowercase_ , min_till_now * number ) # update the maximum product found till now lowercase__ : List[Any] = max(lowercase_ , lowercase_ ) return max_prod
12
def UpperCamelCase ( lowercase_ ) -> int: '''simple docstring''' if n == 1 or not isinstance(lowercase_ , lowercase_ ): return 0 elif n == 2: return 1 else: lowercase__ : List[Any] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def UpperCamelCase ( lowercase_ ) -> int: '''simple docstring''' lowercase__ : Optional[Any] = 0 lowercase__ : Dict = 2 while digits < n: index += 1 lowercase__ : str = len(str(fibonacci(lowercase_ ) ) ) return index def UpperCamelCase ( lowercase_ = 10_00 ) -> int: '''simple docstring''' return fibonacci_digits_index(lowercase_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
12
1
import cmath import math def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> complex: '''simple docstring''' lowercase__ : Dict = math.radians(lowercase_ ) lowercase__ : List[str] = math.radians(lowercase_ ) # Convert voltage and current to rectangular form lowercase__ : Tuple = cmath.rect(lowercase_ , lowercase_ ) lowercase__ : List[Any] = cmath.rect(lowercase_ , lowercase_ ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
12
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter lowerCamelCase__ : Any = """Create a default config file for Accelerate with only a few flags set.""" def UpperCamelCase ( lowercase_="no" , lowercase_ = default_json_config_file , lowercase_ = False ) -> Any: '''simple docstring''' lowercase__ : Any = Path(lowercase_ ) path.parent.mkdir(parents=lowercase_ , exist_ok=lowercase_ ) if path.exists(): print( F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' ) return False lowercase__ : int = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' ) lowercase__ : Dict = { """compute_environment""": """LOCAL_MACHINE""", """mixed_precision""": mixed_precision, } if torch.cuda.is_available(): lowercase__ : Any = torch.cuda.device_count() lowercase__ : Any = num_gpus lowercase__ : Optional[int] = False if num_gpus > 1: lowercase__ : Tuple = """MULTI_GPU""" else: lowercase__ : Optional[Any] = """NO""" elif is_xpu_available() and use_xpu: lowercase__ : Union[str, Any] = torch.xpu.device_count() lowercase__ : str = num_xpus lowercase__ : List[Any] = False if num_xpus > 1: lowercase__ : str = """MULTI_XPU""" else: lowercase__ : Optional[Any] = """NO""" elif is_npu_available(): lowercase__ : Tuple = torch.npu.device_count() lowercase__ : Union[str, Any] = num_npus lowercase__ : Union[str, Any] = False if num_npus > 1: lowercase__ : List[Any] = """MULTI_NPU""" else: lowercase__ : int = """NO""" else: lowercase__ : Union[str, Any] = 0 lowercase__ : str = True lowercase__ : Union[str, Any] = 1 lowercase__ : int = """NO""" lowercase__ : Tuple = ClusterConfig(**lowercase_ ) config.to_json_file(lowercase_ ) return path def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' lowercase__ : List[str] = parser.add_parser("""default""" , parents=lowercase_ , help=lowercase_ , formatter_class=lowercase_ ) parser.add_argument( """--config_file""" , default=lowercase_ , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) , dest="""save_location""" , ) parser.add_argument( """--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=lowercase_ , help="""Whether or not to use mixed precision training. """ """Choose between FP16 and BF16 (bfloat16) training. """ """BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , ) parser.set_defaults(func=lowercase_ ) return parser def UpperCamelCase ( lowercase_ ) -> Any: '''simple docstring''' lowercase__ : Optional[Any] = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F'accelerate configuration saved at {config_file}' )
12
1
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. lowerCamelCase__ : List[Any] = abspath(join(dirname(__file__), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def UpperCamelCase ( lowercase_ ) -> Optional[Any]: '''simple docstring''' config.addinivalue_line( """markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" ) config.addinivalue_line( """markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" ) config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" ) config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" ) config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" ) config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" ) def UpperCamelCase ( lowercase_ ) -> Dict: '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(lowercase_ ) def UpperCamelCase ( lowercase_ ) -> Union[str, Any]: '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main lowercase__ : List[str] = terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(lowercase_ , id=lowercase_ ) def UpperCamelCase ( lowercase_ , lowercase_ ) -> List[str]: '''simple docstring''' if exitstatus == 5: lowercase__ : Optional[Any] = 0 # Doctest custom flag to ignore output. lowerCamelCase__ : Dict = doctest.register_optionflag("""IGNORE_RESULT""") lowerCamelCase__ : List[str] = doctest.OutputChecker class _snake_case ( UpperCAmelCase_ ): def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowerCamelCase__ : Optional[Any] = CustomOutputChecker lowerCamelCase__ : Tuple = HfDoctestModule lowerCamelCase__ : Union[str, Any] = HfDocTestParser
12
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ : List[Any] = logging.get_logger(__name__) lowerCamelCase__ : Union[str, Any] = { """YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""", """YituTech/conv-bert-medium-small""": ( """https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json""" ), """YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""", # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Union[str, Any] = 'convbert' def __init__( self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=9 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) lowercase__ : Dict = vocab_size lowercase__ : List[Any] = hidden_size lowercase__ : Optional[Any] = num_hidden_layers lowercase__ : Union[str, Any] = num_attention_heads lowercase__ : List[str] = intermediate_size lowercase__ : Optional[int] = hidden_act lowercase__ : Tuple = hidden_dropout_prob lowercase__ : List[str] = attention_probs_dropout_prob lowercase__ : Tuple = max_position_embeddings lowercase__ : Dict = type_vocab_size lowercase__ : Union[str, Any] = initializer_range lowercase__ : Dict = layer_norm_eps lowercase__ : Tuple = embedding_size lowercase__ : List[str] = head_ratio lowercase__ : Dict = conv_kernel_size lowercase__ : Dict = num_groups lowercase__ : int = classifier_dropout class _snake_case ( UpperCAmelCase_ ): @property def lowercase__ ( self): '''simple docstring''' if self.task == "multiple-choice": lowercase__ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase__ : str = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ])
12
1
def UpperCamelCase ( lowercase_ ) -> float: '''simple docstring''' if not nums: # Makes sure that the list is not empty raise ValueError("""List is empty""" ) lowercase__ : int = sum(lowercase_ ) / len(lowercase_ ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(lowercase_ ) if __name__ == "__main__": import doctest doctest.testmod()
12
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder lowerCamelCase__ : Any = datasets.utils.logging.get_logger(__name__) class _snake_case ( folder_based_builder.FolderBasedBuilderConfig ): __lowerCAmelCase : bool = None __lowerCAmelCase : bool = None class _snake_case ( folder_based_builder.FolderBasedBuilder ): __lowerCAmelCase : Optional[Any] = datasets.Audio() __lowerCAmelCase : Union[str, Any] = 'audio' __lowerCAmelCase : str = AudioFolderConfig __lowerCAmelCase : List[str] # definition at the bottom of the script __lowerCAmelCase : Optional[int] = AudioClassification(audio_column='audio' , label_column='label' ) lowerCamelCase__ : int = [ """.aiff""", """.au""", """.avr""", """.caf""", """.flac""", """.htk""", """.svx""", """.mat4""", """.mat5""", """.mpc2k""", """.ogg""", """.paf""", """.pvf""", """.raw""", """.rf64""", """.sd2""", """.sds""", """.ircam""", """.voc""", """.w64""", """.wav""", """.nist""", """.wavex""", """.wve""", """.xi""", """.mp3""", """.opus""", ] lowerCamelCase__ : int = AUDIO_EXTENSIONS
12
1
from ...configuration_utils import PretrainedConfig lowerCamelCase__ : List[Any] = { """google/tapas-base-finetuned-sqa""": ( """https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json""" ), """google/tapas-base-finetuned-wtq""": ( """https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json""" ), """google/tapas-base-finetuned-wikisql-supervised""": ( """https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json""" ), """google/tapas-base-finetuned-tabfact""": ( """https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json""" ), } class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : List[str] = 'tapas' def __init__( self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1_0.0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="ratio" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) lowercase__ : Tuple = vocab_size lowercase__ : str = hidden_size lowercase__ : List[Any] = num_hidden_layers lowercase__ : Any = num_attention_heads lowercase__ : List[Any] = hidden_act lowercase__ : Optional[Any] = intermediate_size lowercase__ : List[Any] = hidden_dropout_prob lowercase__ : List[Any] = attention_probs_dropout_prob lowercase__ : List[Any] = max_position_embeddings lowercase__ : List[str] = type_vocab_sizes lowercase__ : Tuple = initializer_range lowercase__ : Optional[Any] = layer_norm_eps # Fine-tuning task hyperparameters lowercase__ : Union[str, Any] = positive_label_weight lowercase__ : Optional[Any] = num_aggregation_labels lowercase__ : str = aggregation_loss_weight lowercase__ : str = use_answer_as_supervision lowercase__ : Union[str, Any] = answer_loss_importance lowercase__ : Union[str, Any] = use_normalized_answer_loss lowercase__ : str = huber_loss_delta lowercase__ : Tuple = temperature lowercase__ : Any = aggregation_temperature lowercase__ : List[Any] = use_gumbel_for_cells lowercase__ : List[str] = use_gumbel_for_aggregation lowercase__ : Dict = average_approximation_function lowercase__ : str = cell_selection_preference lowercase__ : List[Any] = answer_loss_cutoff lowercase__ : Optional[Any] = max_num_rows lowercase__ : str = max_num_columns lowercase__ : Optional[Any] = average_logits_per_cell lowercase__ : Tuple = select_one_column lowercase__ : str = allow_empty_column_selection lowercase__ : Optional[Any] = init_cell_selection_weights_to_zero lowercase__ : Dict = reset_position_index_per_cell lowercase__ : str = disable_per_token_loss # Aggregation hyperparameters lowercase__ : Union[str, Any] = aggregation_labels lowercase__ : Optional[Any] = no_aggregation_label_index if isinstance(self.aggregation_labels , SCREAMING_SNAKE_CASE_): lowercase__ : Any = {int(SCREAMING_SNAKE_CASE_): v for k, v in aggregation_labels.items()}
12
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : int = (DDPMScheduler,) def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = { """num_train_timesteps""": 10_00, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", """variance_type""": """fixed_small""", """clip_sample""": True, } config.update(**SCREAMING_SNAKE_CASE_) return config def lowercase__ ( self): '''simple docstring''' for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2]): self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , ) def lowercase__ ( self): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for t in [0, 5_00, 9_99]: self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = self.scheduler_classes[0] lowercase__ : Union[str, Any] = self.get_scheduler_config() lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_) assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87) - 0.0_0_9_7_9)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99) - 0.0_2)) < 1E-5 def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = self.scheduler_classes[0] lowercase__ : str = self.get_scheduler_config() lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_) lowercase__ : int = len(SCREAMING_SNAKE_CASE_) lowercase__ : Any = self.dummy_model() lowercase__ : List[Any] = self.dummy_sample_deter lowercase__ : str = torch.manual_seed(0) for t in reversed(range(SCREAMING_SNAKE_CASE_)): # 1. predict noise residual lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) # 2. predict previous mean of sample x_t-1 lowercase__ : List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowercase__ : str = pred_prev_sample lowercase__ : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_)) lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_)) assert abs(result_sum.item() - 2_5_8.9_6_0_6) < 1E-2 assert abs(result_mean.item() - 0.3_3_7_2) < 1E-3 def lowercase__ ( self): '''simple docstring''' lowercase__ : List[Any] = self.scheduler_classes[0] lowercase__ : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""") lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_) lowercase__ : Dict = len(SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = self.dummy_model() lowercase__ : Union[str, Any] = self.dummy_sample_deter lowercase__ : int = torch.manual_seed(0) for t in reversed(range(SCREAMING_SNAKE_CASE_)): # 1. predict noise residual lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) # 2. predict previous mean of sample x_t-1 lowercase__ : int = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowercase__ : Tuple = pred_prev_sample lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_)) lowercase__ : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_)) assert abs(result_sum.item() - 2_0_2.0_2_9_6) < 1E-2 assert abs(result_mean.item() - 0.2_6_3_1) < 1E-3 def lowercase__ ( self): '''simple docstring''' lowercase__ : str = self.scheduler_classes[0] lowercase__ : int = self.get_scheduler_config() lowercase__ : str = scheduler_class(**SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = [1_00, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = scheduler.timesteps for i, timestep in enumerate(SCREAMING_SNAKE_CASE_): if i == len(SCREAMING_SNAKE_CASE_) - 1: lowercase__ : Optional[int] = -1 else: lowercase__ : Tuple = timesteps[i + 1] lowercase__ : Any = scheduler.previous_timestep(SCREAMING_SNAKE_CASE_) lowercase__ : int = prev_t.item() self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = self.scheduler_classes[0] lowercase__ : List[Any] = self.get_scheduler_config() lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = [1_00, 87, 50, 51, 0] with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`custom_timesteps` must be in descending order."""): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = self.scheduler_classes[0] lowercase__ : List[Any] = self.get_scheduler_config() lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_) lowercase__ : int = [1_00, 87, 50, 1, 0] lowercase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE_) with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`."""): scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = self.scheduler_classes[0] lowercase__ : int = self.get_scheduler_config() lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_) lowercase__ : str = [scheduler.config.num_train_timesteps] with self.assertRaises( SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
12
1
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Any = ['image_processor', 'tokenizer'] __lowerCAmelCase : Union[str, Any] = 'AutoImageProcessor' __lowerCAmelCase : int = 'AutoTokenizer' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = self.image_processor def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""") if text is not None: lowercase__ : List[str] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) if images is not None: lowercase__ : Optional[int] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) if text is not None and images is not None: lowercase__ : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_) , tensor_type=SCREAMING_SNAKE_CASE_) def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) @property def lowercase__ ( self): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
12
def UpperCamelCase ( lowercase_ ) -> float: '''simple docstring''' if not nums: # Makes sure that the list is not empty raise ValueError("""List is empty""" ) lowercase__ : int = sum(lowercase_ ) / len(lowercase_ ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(lowercase_ ) if __name__ == "__main__": import doctest doctest.testmod()
12
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _snake_case ( unittest.TestCase ): def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , ): '''simple docstring''' lowercase__ : List[str] = size if size is not None else {"""height""": 18, """width""": 18} lowercase__ : int = parent lowercase__ : Union[str, Any] = batch_size lowercase__ : List[str] = num_channels lowercase__ : str = image_size lowercase__ : int = min_resolution lowercase__ : Dict = max_resolution lowercase__ : Tuple = do_resize lowercase__ : Union[str, Any] = size lowercase__ : Any = do_normalize lowercase__ : Tuple = image_mean lowercase__ : str = image_std def lowercase__ ( self): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class _snake_case ( UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Optional[Any] = ViTImageProcessor if is_vision_available() else None def lowercase__ ( self): '''simple docstring''' lowercase__ : str = EfficientFormerImageProcessorTester(self) @property def lowercase__ ( self): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_mean""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_std""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size""")) def lowercase__ ( self): '''simple docstring''' pass def lowercase__ ( self): '''simple docstring''' lowercase__ : str = self.image_processing_class(**self.image_processor_dict) # create random PIL images lowercase__ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image) # Test not batched input lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched lowercase__ : str = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors lowercase__ : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray) # Test not batched input lowercase__ : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched lowercase__ : Dict = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors lowercase__ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor) # Test not batched input lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched lowercase__ : Any = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , )
12
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__) class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Any = ['pixel_values'] def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 8 , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = do_rescale lowercase__ : List[Any] = rescale_factor lowercase__ : Tuple = do_pad lowercase__ : Optional[Any] = pad_size def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None): '''simple docstring''' lowercase__ , lowercase__ : Optional[int] = get_image_size(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = (old_height // size + 1) * size - old_height lowercase__ : str = (old_width // size + 1) * size - old_width return pad(SCREAMING_SNAKE_CASE_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' lowercase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : Union[str, Any] = do_pad if do_pad is not None else self.do_pad lowercase__ : Optional[Any] = pad_size if pad_size is not None else self.pad_size lowercase__ : str = make_list_of_images(SCREAMING_SNAKE_CASE_) if not valid_images(SCREAMING_SNAKE_CASE_): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""") if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""") # All transformations expect numpy arrays. lowercase__ : List[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE_) for image in images] if do_rescale: lowercase__ : str = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_) for image in images] if do_pad: lowercase__ : List[str] = [self.pad(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_) for image in images] lowercase__ : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) for image in images] lowercase__ : Dict = {"""pixel_values""": images} return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_)
12
1
# This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests lowerCamelCase__ : Optional[Any] = open # noqa: we just need to have a builtin inside this module to test it properly
12
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu lowerCamelCase__ : Optional[int] = [ """EAGER""", """AOT_EAGER""", """INDUCTOR""", """NVFUSER""", """AOT_NVFUSER""", """AOT_CUDAGRAPHS""", """OFI""", """FX2TRT""", """ONNXRT""", """IPEX""", ] def UpperCamelCase ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[Any]: '''simple docstring''' lowercase__ : List[Any] = True while ask_again: lowercase__ : Tuple = input(lowercase_ ) try: if default is not None and len(lowercase_ ) == 0: return default return convert_value(lowercase_ ) if convert_value is not None else result except Exception: if error_message is not None: print(lowercase_ ) def UpperCamelCase ( lowercase_ , lowercase_=[] , lowercase_=None , lowercase_=0 ) -> Union[str, Any]: '''simple docstring''' lowercase__ : List[Any] = BulletMenu(lowercase_ , lowercase_ ) lowercase__ : Any = menu.run(default_choice=lowercase_ ) return convert_value(lowercase_ ) if convert_value is not None else result def UpperCamelCase ( lowercase_ ) -> str: '''simple docstring''' lowercase__ : Union[str, Any] = int(lowercase_ ) return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] ) def UpperCamelCase ( lowercase_ ) -> Optional[int]: '''simple docstring''' lowercase__ : List[str] = int(lowercase_ ) return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] ) def UpperCamelCase ( lowercase_ ) -> str: '''simple docstring''' lowercase__ : str = int(lowercase_ ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def UpperCamelCase ( lowercase_ ) -> Union[str, Any]: '''simple docstring''' lowercase__ : List[Any] = int(lowercase_ ) return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] ) def UpperCamelCase ( lowercase_ ) -> Optional[int]: '''simple docstring''' lowercase__ : List[Any] = int(lowercase_ ) return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] ) def UpperCamelCase ( lowercase_ ) -> Optional[int]: '''simple docstring''' return {"yes": True, "no": False}[value.lower()] class _snake_case ( argparse.RawDescriptionHelpFormatter ): def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : int = super()._format_usage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = usage.replace("""<command> [<args>] """ , """""") return usage
12
1
from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean lowerCamelCase__ : Optional[int] = 0 lowerCamelCase__ : Any = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowerCamelCase__ : Any = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right lowerCamelCase__ : Any = tuple[int, int] class _snake_case : def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' lowercase__ : List[str] = pos_x lowercase__ : Dict = pos_y lowercase__ : Any = (pos_y, pos_x) lowercase__ : str = goal_x lowercase__ : Optional[int] = goal_y lowercase__ : Dict = g_cost lowercase__ : List[str] = parent lowercase__ : Any = self.calculate_heuristic() lowercase__ : str = self.g_cost + self.h_cost def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = self.pos_x - self.goal_x lowercase__ : Optional[Any] = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(SCREAMING_SNAKE_CASE_) + abs(SCREAMING_SNAKE_CASE_) else: return sqrt(dy**2 + dx**2) def __lt__( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.f_cost < other.f_cost class _snake_case : def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = [self.start] lowercase__ : list[Node] = [] lowercase__ : Dict = False def lowercase__ ( self): '''simple docstring''' while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() lowercase__ : List[str] = self.open_nodes.pop(0) if current_node.pos == self.target.pos: return self.retrace_path(SCREAMING_SNAKE_CASE_) self.closed_nodes.append(SCREAMING_SNAKE_CASE_) lowercase__ : int = self.get_successors(SCREAMING_SNAKE_CASE_) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(SCREAMING_SNAKE_CASE_) else: # retrieve the best current path lowercase__ : List[Any] = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE_)) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(SCREAMING_SNAKE_CASE_) else: self.open_nodes.append(SCREAMING_SNAKE_CASE_) return [self.start.pos] def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : str = [] for action in delta: lowercase__ : Any = parent.pos_x + action[1] lowercase__ : Tuple = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE_) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE_ , )) return successors def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Optional[Any] = node lowercase__ : Optional[Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x)) lowercase__ : List[Any] = current_node.parent path.reverse() return path class _snake_case : def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : List[str] = AStar(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : str = AStar(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = False def lowercase__ ( self): '''simple docstring''' while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() lowercase__ : List[Any] = self.fwd_astar.open_nodes.pop(0) lowercase__ : Optional[int] = self.bwd_astar.open_nodes.pop(0) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) self.fwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE_) self.bwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE_) lowercase__ : str = current_bwd_node lowercase__ : str = current_fwd_node lowercase__ : Union[str, Any] = { self.fwd_astar: self.fwd_astar.get_successors(SCREAMING_SNAKE_CASE_), self.bwd_astar: self.bwd_astar.get_successors(SCREAMING_SNAKE_CASE_), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(SCREAMING_SNAKE_CASE_) else: # retrieve the best current path lowercase__ : Optional[Any] = astar.open_nodes.pop( astar.open_nodes.index(SCREAMING_SNAKE_CASE_)) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(SCREAMING_SNAKE_CASE_) else: astar.open_nodes.append(SCREAMING_SNAKE_CASE_) return [self.fwd_astar.start.pos] def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Any = self.fwd_astar.retrace_path(SCREAMING_SNAKE_CASE_) lowercase__ : str = self.bwd_astar.retrace_path(SCREAMING_SNAKE_CASE_) bwd_path.pop() bwd_path.reverse() lowercase__ : List[str] = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] lowerCamelCase__ : str = (0, 0) lowerCamelCase__ : Dict = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) lowerCamelCase__ : int = time.time() lowerCamelCase__ : List[Any] = AStar(init, goal) lowerCamelCase__ : str = a_star.search() lowerCamelCase__ : Any = time.time() - start_time print(f'''AStar execution time = {end_time:f} seconds''') lowerCamelCase__ : List[str] = time.time() lowerCamelCase__ : Tuple = BidirectionalAStar(init, goal) lowerCamelCase__ : List[str] = time.time() - bd_start_time print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
12
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ : Tuple = { """configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""], """processing_mgp_str""": ["""MgpstrProcessor"""], """tokenization_mgp_str""": ["""MgpstrTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Optional[int] = [ """MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""", """MgpstrModel""", """MgpstrPreTrainedModel""", """MgpstrForSceneTextRecognition""", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
12
1
import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def UpperCamelCase ( lowercase_ ) -> Union[str, Any]: '''simple docstring''' return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def UpperCamelCase ( lowercase_ , lowercase_ ) -> List[Any]: '''simple docstring''' lowercase__ : int = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue lowercase__ : Optional[Any] = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" ) lowercase__ : Optional[Any] = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" ) lowercase__ : Optional[Any] = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" ) lowercase__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" ) lowercase__ : Optional[Any] = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" ) lowercase__ : Optional[int] = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" ) lowercase__ : List[Any] = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" ) lowercase__ : int = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" ) lowercase__ : Optional[Any] = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" ) lowercase__ : Optional[Any] = key.replace("""image_encoder.module""" , """flava.image_model""" ) lowercase__ : Any = key.replace("""text_encoder.module""" , """flava.text_model""" ) lowercase__ : Optional[Any] = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" ) lowercase__ : Tuple = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" ) lowercase__ : Any = key.replace("""text_projection""" , """flava.text_projection""" ) lowercase__ : List[Any] = key.replace("""image_projection""" , """flava.image_projection""" ) lowercase__ : str = value.float() for key, value in codebook_state_dict.items(): lowercase__ : Any = value return upgrade @torch.no_grad() def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Union[str, Any]: '''simple docstring''' if config_path is not None: lowercase__ : int = FlavaConfig.from_pretrained(lowercase_ ) else: lowercase__ : Optional[int] = FlavaConfig() lowercase__ : List[Any] = FlavaForPreTraining(lowercase_ ).eval() lowercase__ : Dict = convert_dalle_checkpoint(lowercase_ , lowercase_ , save_checkpoint=lowercase_ ) if os.path.exists(lowercase_ ): lowercase__ : Dict = torch.load(lowercase_ , map_location="""cpu""" ) else: lowercase__ : Dict = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" ) lowercase__ : int = upgrade_state_dict(lowercase_ , lowercase_ ) hf_model.load_state_dict(lowercase_ ) lowercase__ : Optional[int] = hf_model.state_dict() lowercase__ : Optional[int] = count_parameters(lowercase_ ) lowercase__ : Any = count_parameters(lowercase_ ) + count_parameters(lowercase_ ) assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) hf_model.save_pretrained(lowercase_ ) if __name__ == "__main__": lowerCamelCase__ : int = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""") parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") lowerCamelCase__ : List[str] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
12
import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class _snake_case ( UpperCAmelCase_ ): def __init__( self): '''simple docstring''' lowercase__ : List[Any] = [] def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_init_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_train_begin""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_train_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_epoch_begin""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_epoch_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_step_begin""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_step_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_evaluate""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_predict""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_save""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_log""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_prediction_step""") @require_torch class _snake_case ( unittest.TestCase ): def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = tempfile.mkdtemp() def lowercase__ ( self): '''simple docstring''' shutil.rmtree(self.output_dir) def lowercase__ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Any = RegressionDataset(length=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = RegressionDataset(length=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = RegressionModelConfig(a=SCREAMING_SNAKE_CASE_ , b=SCREAMING_SNAKE_CASE_) lowercase__ : Any = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE_) lowercase__ : Any = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE_ , report_to=[] , **SCREAMING_SNAKE_CASE_) return Trainer( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , callbacks=SCREAMING_SNAKE_CASE_ , ) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_)) # Order doesn't matter lowercase__ : str = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__) lowercase__ : Tuple = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__) for cba, cba in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assertEqual(SCREAMING_SNAKE_CASE_ , cba.__class__) elif not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE_) else: self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : int = ["""on_init_end""", """on_train_begin"""] lowercase__ : Union[str, Any] = 0 lowercase__ : Union[str, Any] = len(trainer.get_eval_dataloader()) lowercase__ : Dict = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader()) + ["""on_log""", """on_evaluate"""] for _ in range(trainer.state.num_train_epochs): expected_events.append("""on_epoch_begin""") for _ in range(SCREAMING_SNAKE_CASE_): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("""on_log""") if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("""on_save""") expected_events.append("""on_epoch_end""") if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def lowercase__ ( self): '''simple docstring''' lowercase__ : int = self.get_trainer() lowercase__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) # Callbacks passed at init are added to the default callbacks lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback]) expected_callbacks.append(SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback lowercase__ : Any = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback] lowercase__ : Tuple = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.remove(SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = self.get_trainer() lowercase__ : List[Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_) self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) trainer.add_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) # We can also add, pop, or remove by instance lowercase__ : Union[str, Any] = self.get_trainer() lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0] trainer.remove_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.remove(SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) lowercase__ : str = self.get_trainer() lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0] lowercase__ : Union[str, Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) trainer.add_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="""ignore""" , category=SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback]) trainer.train() lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) # Independent log/save/eval lowercase__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5) trainer.train() lowercase__ : List[str] = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5) trainer.train() lowercase__ : Dict = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""") trainer.train() lowercase__ : int = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) lowercase__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""") trainer.train() lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) # A bit of everything lowercase__ : Any = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , ) trainer.train() lowercase__ : str = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) # warning should be emitted for duplicated callbacks with patch("""transformers.trainer_callback.logger.warning""") as warn_mock: lowercase__ : Dict = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(SCREAMING_SNAKE_CASE_) in warn_mock.call_args[0][0]
12
1
from __future__ import annotations def UpperCamelCase ( lowercase_ , lowercase_ = None ) -> list[list[str]]: '''simple docstring''' lowercase__ : Dict = word_bank or [] # create a table lowercase__ : int = len(lowercase_ ) + 1 lowercase__ : list[list[list[str]]] = [] for _ in range(lowercase_ ): table.append([] ) # seed value lowercase__ : Dict = [[]] # because empty string has empty combination # iterate through the indices for i in range(lowercase_ ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(lowercase_ )] == word: lowercase__ : list[list[str]] = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(lowercase_ )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(lowercase_ )]: combination.reverse() return table[len(lowercase_ )] if __name__ == "__main__": print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""])) print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""])) print( all_construct( """hexagonosaurus""", ["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""], ) )
12
import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class _snake_case ( UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = RoCBertTokenizer __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : str = False __lowerCAmelCase : List[Any] = True __lowerCAmelCase : Optional[int] = filter_non_english def lowercase__ ( self): '''simple docstring''' super().setUp() lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""] lowercase__ : Dict = {} lowercase__ : Tuple = {} for i, value in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Tuple = i lowercase__ : Any = i lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""]) lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens])) with open(self.word_shape_file , """w""" , encoding="""utf-8""") as word_shape_writer: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_) with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""") as word_pronunciation_writer: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file) lowercase__ : Optional[int] = tokenizer.tokenize("""你好[SEP]你是谁""") self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""]) self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8]) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8]) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8]) def lowercase__ ( self): '''simple docstring''' lowercase__ : int = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""") , ["""ah""", """\u535A""", """\u63A8""", """zz"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""hello""", """!""", """how""", """are""", """you""", """?"""]) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""]) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""h\u00E9llo"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""]) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""]) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=["""[UNK]"""]) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""") , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] lowercase__ : Optional[int] = {} for i, token in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Optional[Any] = i lowercase__ : Union[str, Any] = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token="""[UNK]""") self.assertListEqual(tokenizer.tokenize("""""") , []) self.assertListEqual(tokenizer.tokenize("""unwanted running""") , ["""un""", """##want""", """##ed""", """runn""", """##ing"""]) self.assertListEqual(tokenizer.tokenize("""unwantedX running""") , ["""[UNK]""", """runn""", """##ing"""]) def lowercase__ ( self): '''simple docstring''' self.assertTrue(_is_whitespace(""" """)) self.assertTrue(_is_whitespace("""\t""")) self.assertTrue(_is_whitespace("""\r""")) self.assertTrue(_is_whitespace("""\n""")) self.assertTrue(_is_whitespace("""\u00A0""")) self.assertFalse(_is_whitespace("""A""")) self.assertFalse(_is_whitespace("""-""")) def lowercase__ ( self): '''simple docstring''' self.assertTrue(_is_control("""\u0005""")) self.assertFalse(_is_control("""A""")) self.assertFalse(_is_control(""" """)) self.assertFalse(_is_control("""\t""")) self.assertFalse(_is_control("""\r""")) def lowercase__ ( self): '''simple docstring''' self.assertTrue(_is_punctuation("""-""")) self.assertTrue(_is_punctuation("""$""")) self.assertTrue(_is_punctuation("""`""")) self.assertTrue(_is_punctuation(""".""")) self.assertFalse(_is_punctuation("""A""")) self.assertFalse(_is_punctuation(""" """)) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]]) if self.test_rust_tokenizer: lowercase__ : int = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]]) def lowercase__ ( self): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'): lowercase__ : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.' lowercase__ : List[str] = tokenizer_r.encode_plus( SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , ) lowercase__ : str = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ , """do_lower_case""") else False lowercase__ : Optional[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """A"""), ((1, 2), ""","""), ((3, 5), """na"""), ((5, 6), """##ï"""), ((6, 8), """##ve"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """Allen"""), ((21, 23), """##NL"""), ((23, 24), """##P"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """a"""), ((1, 2), ""","""), ((3, 8), """naive"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """allen"""), ((21, 23), """##nl"""), ((23, 24), """##p"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""])) self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = ["""的""", """人""", """有"""] lowercase__ : List[str] = """""".join(SCREAMING_SNAKE_CASE_) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'): lowercase__ : Union[str, Any] = True lowercase__ : Tuple = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : str = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Any = False lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_) # it is expected that only the first Chinese character is not preceded by "##". lowercase__ : Any = [ f'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_) ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) @slow def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file) lowercase__ : Optional[Any] = tokenizer.encode("""你好""" , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Any = tokenizer.encode("""你是谁""" , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}'): lowercase__ : Optional[int] = """你好,你是谁""" lowercase__ : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_) lowercase__ : Any = tokenizer.prepare_for_model( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
12
1
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=UpperCAmelCase_ ) class _snake_case ( UpperCAmelCase_ ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization __lowerCAmelCase : str = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} ) __lowerCAmelCase : ClassVar[Features] = Features({'question': Value('string' ), 'context': Value('string' )} ) __lowerCAmelCase : ClassVar[Features] = Features( { 'answers': Sequence( { 'text': Value('string' ), 'answer_start': Value('int32' ), } ) } ) __lowerCAmelCase : str = "question" __lowerCAmelCase : str = "context" __lowerCAmelCase : str = "answers" @property def lowercase__ ( self): '''simple docstring''' return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
12
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase_ ) class _snake_case ( UpperCAmelCase_ ): def __init__( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_) if self.framework == "tf": raise ValueError(f'The {self.__class__} is only available in PyTorch.') requires_backends(self , """vision""") self.check_model_type(SCREAMING_SNAKE_CASE_) def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' if "text_queries" in kwargs: lowercase__ : Any = kwargs.pop("""text_queries""") if isinstance(SCREAMING_SNAKE_CASE_ , (str, Image.Image)): lowercase__ : Optional[Any] = {"""image""": image, """candidate_labels""": candidate_labels} else: lowercase__ : int = image lowercase__ : List[str] = super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) return results def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = {} if "threshold" in kwargs: lowercase__ : List[Any] = kwargs["""threshold"""] if "top_k" in kwargs: lowercase__ : int = kwargs["""top_k"""] return {}, {}, postprocess_params def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : str = load_image(inputs["""image"""]) lowercase__ : Any = inputs["""candidate_labels"""] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): lowercase__ : List[str] = candidate_labels.split(""",""") lowercase__ : Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa) for i, candidate_label in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework) lowercase__ : Union[str, Any] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework) yield { "is_last": i == len(SCREAMING_SNAKE_CASE_) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : str = model_inputs.pop("""target_size""") lowercase__ : Optional[int] = model_inputs.pop("""candidate_label""") lowercase__ : Dict = model_inputs.pop("""is_last""") lowercase__ : Union[str, Any] = self.model(**SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs} return model_outputs def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=None): '''simple docstring''' lowercase__ : Union[str, Any] = [] for model_output in model_outputs: lowercase__ : Optional[int] = model_output["""candidate_label"""] lowercase__ : Tuple = BaseModelOutput(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = self.image_processor.post_process_object_detection( outputs=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ , target_sizes=model_output["""target_size"""])[0] for index in outputs["scores"].nonzero(): lowercase__ : Optional[Any] = outputs["""scores"""][index].item() lowercase__ : Optional[Any] = self._get_bounding_box(outputs["""boxes"""][index][0]) lowercase__ : Tuple = {"""score""": score, """label""": label, """box""": box} results.append(SCREAMING_SNAKE_CASE_) lowercase__ : int = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: x["score"] , reverse=SCREAMING_SNAKE_CASE_) if top_k: lowercase__ : Any = results[:top_k] return results def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' if self.framework != "pt": raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""") lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = box.int().tolist() lowercase__ : Optional[int] = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
12
1
from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def UpperCamelCase ( lowercase_ , lowercase_ ) -> Dict: '''simple docstring''' lowercase__ : List[str] = k_size // 2 lowercase__ , lowercase__ : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center] lowercase__ : Optional[int] = 1 / (2 * pi * sigma) * exp(-(square(lowercase_ ) + square(lowercase_ )) / (2 * square(lowercase_ )) ) return g def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> List[str]: '''simple docstring''' lowercase__ , lowercase__ : List[Any] = image.shape[0], image.shape[1] # dst image height and width lowercase__ : Tuple = height - k_size + 1 lowercase__ : Dict = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows lowercase__ : List[str] = zeros((dst_height * dst_width, k_size * k_size) ) lowercase__ : Optional[int] = 0 for i, j in product(range(lowercase_ ) , range(lowercase_ ) ): lowercase__ : Any = ravel(image[i : i + k_size, j : j + k_size] ) lowercase__ : List[Any] = window row += 1 # turn the kernel into shape(k*k, 1) lowercase__ : Dict = gen_gaussian_kernel(lowercase_ , lowercase_ ) lowercase__ : Tuple = ravel(lowercase_ ) # reshape and get the dst image lowercase__ : Any = dot(lowercase_ , lowercase_ ).reshape(lowercase_ , lowercase_ ).astype(lowercase_ ) return dst if __name__ == "__main__": # read original image lowerCamelCase__ : Any = imread(R"""../image_data/lena.jpg""") # turn image in gray scale value lowerCamelCase__ : Dict = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size lowerCamelCase__ : List[Any] = gaussian_filter(gray, 3, sigma=1) lowerCamelCase__ : List[Any] = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("""gaussian filter with 3x3 mask""", gaussianaxa) imshow("""gaussian filter with 5x5 mask""", gaussianaxa) waitKey()
12
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]: '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: lowercase__ : str = mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) else: lowercase__ : List[str] = max( mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) , mf_knapsack(i - 1 , lowercase_ , lowercase_ , j - wt[i - 1] ) + val[i - 1] , ) lowercase__ : List[Any] = val return f[i][j] def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str: '''simple docstring''' lowercase__ : Any = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: lowercase__ : List[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: lowercase__ : Tuple = dp[i - 1][w_] return dp[n][w_], dp def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' if not (isinstance(lowercase_ , (list, tuple) ) and isinstance(lowercase_ , (list, tuple) )): raise ValueError( """Both the weights and values vectors must be either lists or tuples""" ) lowercase__ : str = len(lowercase_ ) if num_items != len(lowercase_ ): lowercase__ : Optional[int] = ( """The number of weights must be the same as the number of values.\n""" F'But got {num_items} weights and {len(lowercase_ )} values' ) raise ValueError(lowercase_ ) for i in range(lowercase_ ): if not isinstance(wt[i] , lowercase_ ): lowercase__ : int = ( """All weights must be integers but got weight of """ F'type {type(wt[i] )} at index {i}' ) raise TypeError(lowercase_ ) lowercase__ , lowercase__ : Tuple = knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : set = set() _construct_solution(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) return optimal_val, example_optional_set def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any: '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(lowercase_ , lowercase_ , i - 1 , lowercase_ , lowercase_ ) else: optimal_set.add(lowercase_ ) _construct_solution(lowercase_ , lowercase_ , i - 1 , j - wt[i - 1] , lowercase_ ) if __name__ == "__main__": lowerCamelCase__ : Dict = [3, 2, 4, 4] lowerCamelCase__ : List[Any] = [4, 3, 2, 3] lowerCamelCase__ : Optional[int] = 4 lowerCamelCase__ : Dict = 6 lowerCamelCase__ : Optional[int] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] lowerCamelCase__ , lowerCamelCase__ : int = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 lowerCamelCase__ , lowerCamelCase__ : Optional[int] = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
12
1
from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ ): __lowerCAmelCase : List[Any] = 'pixel_values' __lowerCAmelCase : Union[str, Any] = False __lowerCAmelCase : List[Any] = TimmBackboneConfig def __init__( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(self , """timm""") super().__init__(SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = config if config.backbone is None: raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""") if config.backbone not in timm.list_models(): raise ValueError(f'backbone {config.backbone} is not supported by timm.') if hasattr(SCREAMING_SNAKE_CASE_ , """out_features""") and config.out_features is not None: raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""") lowercase__ : Any = getattr(SCREAMING_SNAKE_CASE_ , """use_pretrained_backbone""" , SCREAMING_SNAKE_CASE_) if pretrained is None: raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""") # We just take the final layer by default. This matches the default for the transformers models. lowercase__ : Optional[int] = config.out_indices if getattr(SCREAMING_SNAKE_CASE_ , """out_indices""" , SCREAMING_SNAKE_CASE_) is not None else (-1,) lowercase__ : List[str] = timm.create_model( config.backbone , pretrained=SCREAMING_SNAKE_CASE_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. lowercase__ : List[str] = self._backbone.return_layers lowercase__ : Union[str, Any] = {layer["""module"""]: str(SCREAMING_SNAKE_CASE_) for i, layer in enumerate(self._backbone.feature_info.info)} super()._init_backbone(SCREAMING_SNAKE_CASE_) @classmethod def lowercase__ ( cls , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' requires_backends(cls , ["""vision""", """timm"""]) from ...models.timm_backbone import TimmBackboneConfig lowercase__ : Dict = kwargs.pop("""config""" , TimmBackboneConfig()) lowercase__ : List[str] = kwargs.pop("""use_timm_backbone""" , SCREAMING_SNAKE_CASE_) if not use_timm: raise ValueError("""use_timm_backbone must be True for timm backbones""") lowercase__ : Union[str, Any] = kwargs.pop("""num_channels""" , config.num_channels) lowercase__ : Optional[int] = kwargs.pop("""features_only""" , config.features_only) lowercase__ : int = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone) lowercase__ : Any = kwargs.pop("""out_indices""" , config.out_indices) lowercase__ : List[str] = TimmBackboneConfig( backbone=SCREAMING_SNAKE_CASE_ , num_channels=SCREAMING_SNAKE_CASE_ , features_only=SCREAMING_SNAKE_CASE_ , use_pretrained_backbone=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , ) return super()._from_config(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' pass def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict lowercase__ : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase__ : Optional[Any] = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError("""Cannot output attentions for timm backbones at the moment""") if output_hidden_states: # We modify the return layers to include all the stages of the backbone lowercase__ : List[str] = self._all_layers lowercase__ : Optional[int] = self._backbone(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = self._return_layers lowercase__ : Optional[Any] = tuple(hidden_states[i] for i in self.out_indices) else: lowercase__ : int = self._backbone(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = None lowercase__ : Optional[Any] = tuple(SCREAMING_SNAKE_CASE_) lowercase__ : int = tuple(SCREAMING_SNAKE_CASE_) if hidden_states is not None else None if not return_dict: lowercase__ : Union[str, Any] = (feature_maps,) if output_hidden_states: lowercase__ : Optional[Any] = output + (hidden_states,) return output return BackboneOutput(feature_maps=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ , attentions=SCREAMING_SNAKE_CASE_)
12
import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def UpperCamelCase ( lowercase_ ) -> Union[str, Any]: '''simple docstring''' return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def UpperCamelCase ( lowercase_ , lowercase_ ) -> List[Any]: '''simple docstring''' lowercase__ : int = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue lowercase__ : Optional[Any] = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" ) lowercase__ : Optional[Any] = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" ) lowercase__ : Optional[Any] = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" ) lowercase__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" ) lowercase__ : Optional[Any] = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" ) lowercase__ : Optional[int] = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" ) lowercase__ : List[Any] = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" ) lowercase__ : int = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" ) lowercase__ : Optional[Any] = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" ) lowercase__ : Optional[Any] = key.replace("""image_encoder.module""" , """flava.image_model""" ) lowercase__ : Any = key.replace("""text_encoder.module""" , """flava.text_model""" ) lowercase__ : Optional[Any] = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" ) lowercase__ : Tuple = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" ) lowercase__ : Any = key.replace("""text_projection""" , """flava.text_projection""" ) lowercase__ : List[Any] = key.replace("""image_projection""" , """flava.image_projection""" ) lowercase__ : str = value.float() for key, value in codebook_state_dict.items(): lowercase__ : Any = value return upgrade @torch.no_grad() def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Union[str, Any]: '''simple docstring''' if config_path is not None: lowercase__ : int = FlavaConfig.from_pretrained(lowercase_ ) else: lowercase__ : Optional[int] = FlavaConfig() lowercase__ : List[Any] = FlavaForPreTraining(lowercase_ ).eval() lowercase__ : Dict = convert_dalle_checkpoint(lowercase_ , lowercase_ , save_checkpoint=lowercase_ ) if os.path.exists(lowercase_ ): lowercase__ : Dict = torch.load(lowercase_ , map_location="""cpu""" ) else: lowercase__ : Dict = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" ) lowercase__ : int = upgrade_state_dict(lowercase_ , lowercase_ ) hf_model.load_state_dict(lowercase_ ) lowercase__ : Optional[int] = hf_model.state_dict() lowercase__ : Optional[int] = count_parameters(lowercase_ ) lowercase__ : Any = count_parameters(lowercase_ ) + count_parameters(lowercase_ ) assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) hf_model.save_pretrained(lowercase_ ) if __name__ == "__main__": lowerCamelCase__ : int = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""") parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") lowerCamelCase__ : List[str] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
12
1
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #################################################################################################### # # Note: If when running this conversion script you're getting an exception: # ModuleNotFoundError: No module named 'megatron.model.enums' # you need to tell python where to find the clone of Megatron-LM, e.g.: # # cd /tmp # git clone https://github.com/NVIDIA/Megatron-LM # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ... # # if you already have it cloned elsewhere, simply adjust the path to the existing path # # If the training was done using a Megatron-LM fork, e.g., # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one # in your path, i.e., /path/to/Megatron-DeepSpeed/ # import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=0 ) -> List[str]: '''simple docstring''' if name is None: lowercase__ : List[Any] = None else: lowercase__ : Any = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}""" lowercase__ : int = fmt.format(lowercase_ ) # Print and recurse (if needed). if isinstance(lowercase_ , lowercase_ ): if msg is not None: print(lowercase_ ) for k in val.keys(): recursive_print(lowercase_ , val[k] , spaces + 2 ) elif isinstance(lowercase_ , torch.Tensor ): print(lowercase_ , """:""" , val.size() ) else: print(lowercase_ , """:""" , lowercase_ ) def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' lowercase__ : List[Any] = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] lowercase__ : Optional[int] = (num_heads, hidden_size, num_splits) + input_shape[1:] lowercase__ : str = param.view(*lowercase_ ) lowercase__ : Dict = param.transpose(0 , 2 ) lowercase__ : Tuple = param.transpose(1 , 2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] lowercase__ : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:] lowercase__ : int = param.view(*lowercase_ ) lowercase__ : List[Any] = param.transpose(0 , 1 ).contiguous() lowercase__ : List[str] = param.view(*lowercase_ ) return param def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Any: '''simple docstring''' lowercase__ : Union[str, Any] = {} # old versions did not store training args lowercase__ : Optional[int] = input_state_dict.get("""args""" , lowercase_ ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) lowercase__ : Optional[int] = ds_args.padded_vocab_size lowercase__ : List[str] = ds_args.max_position_embeddings lowercase__ : Union[str, Any] = ds_args.hidden_size lowercase__ : Union[str, Any] = ds_args.num_layers lowercase__ : List[Any] = ds_args.num_attention_heads lowercase__ : Any = ds_args.ffn_hidden_size # pprint(config) # The number of heads. lowercase__ : Tuple = config.n_head # The hidden_size per head. lowercase__ : List[str] = config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): lowercase__ : str = input_state_dict["""checkpoint_version"""] else: lowercase__ : Optional[int] = 0.0 # The model. lowercase__ : Tuple = input_state_dict["""model"""] # The language model. lowercase__ : List[str] = model["""language_model"""] # The embeddings. lowercase__ : Dict = lm["""embedding"""] # The word embeddings. lowercase__ : Any = embeddings["""word_embeddings"""]["""weight"""] # Truncate the embedding table to vocab_size rows. lowercase__ : List[Any] = word_embeddings[: config.vocab_size, :] lowercase__ : List[str] = word_embeddings # The position embeddings. lowercase__ : Tuple = embeddings["""position_embeddings"""]["""weight"""] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] lowercase__ : List[str] = pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' ) # Store the position embeddings. lowercase__ : Optional[Any] = pos_embeddings # The transformer. lowercase__ : List[str] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""] # The regex to extract layer names. lowercase__ : str = re.compile(R"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" ) # The simple map of names for "automated" rules. lowercase__ : Optional[int] = { """attention.dense""": """.attn.c_proj.""", """self_attention.dense""": """.attn.c_proj.""", """mlp.dense_h_to_4h""": """.mlp.c_fc.""", """mlp.dense_4h_to_h""": """.mlp.c_proj.""", } # Extract the layers. for key, val in transformer.items(): # Match the name. lowercase__ : int = layer_re.match(lowercase_ ) # Stop if that's not a layer if m is None: break # The index of the layer. lowercase__ : Union[str, Any] = int(m.group(1 ) ) # The name of the operation. lowercase__ : Optional[Any] = m.group(2 ) # Is it a weight or a bias? lowercase__ : Optional[int] = m.group(3 ) # The name of the layer. lowercase__ : List[str] = F'transformer.h.{layer_idx}' # For layernorm(s), simply store the layer norm. if op_name.endswith("""layernorm""" ): lowercase__ : Optional[Any] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2""" lowercase__ : str = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. lowercase__ : List[str] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view( 1 , 1 , lowercase_ , lowercase_ ) lowercase__ : Any = causal_mask # Insert a "dummy" tensor for masked_bias. lowercase__ : Dict = torch.tensor(-1E4 , dtype=torch.floataa ) lowercase__ : Tuple = masked_bias lowercase__ : List[Any] = fix_query_key_value_ordering(lowercase_ , lowercase_ , 3 , lowercase_ , lowercase_ ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. lowercase__ : Optional[int] = out_val.transpose(0 , 1 ).contiguous() # Store. lowercase__ : int = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": lowercase__ : List[str] = fix_query_key_value_ordering(lowercase_ , lowercase_ , 3 , lowercase_ , lowercase_ ) # Store. No change of shape. lowercase__ : Dict = out_val # Transpose the weights. elif weight_or_bias == "weight": lowercase__ : List[str] = megatron_to_transformers[op_name] lowercase__ : int = val.transpose(0 , 1 ) # Copy the bias. elif weight_or_bias == "bias": lowercase__ : List[str] = megatron_to_transformers[op_name] lowercase__ : Dict = val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. lowercase__ : Union[str, Any] = transformer["""final_layernorm.weight"""] lowercase__ : List[Any] = transformer["""final_layernorm.bias"""] # For LM head, transformers' wants the matrix to weight embeddings. lowercase__ : Union[str, Any] = word_embeddings # It should be done! return output_state_dict def UpperCamelCase ( ) -> Optional[Any]: '''simple docstring''' lowercase__ : List[str] = argparse.ArgumentParser() parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" ) parser.add_argument( """path_to_checkpoint""" , type=lowercase_ , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , ) parser.add_argument( """--config_file""" , default="""""" , type=lowercase_ , help="""An optional config json file describing the pre-trained model.""" , ) lowercase__ : Optional[int] = parser.parse_args() # Extract the basename. lowercase__ : Union[str, Any] = os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' ) if args.path_to_checkpoint.endswith(""".zip""" ): with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint: with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict: lowercase__ : Union[str, Any] = torch.load(lowercase_ , map_location="""cpu""" ) else: lowercase__ : int = torch.load(args.path_to_checkpoint , map_location="""cpu""" ) lowercase__ : Optional[int] = input_state_dict.get("""args""" , lowercase_ ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: lowercase__ : int = """gelu_fast""" elif ds_args.openai_gelu: lowercase__ : Tuple = """gelu_new""" else: lowercase__ : Union[str, Any] = """gelu""" else: # in the very early days this used to be "gelu_new" lowercase__ : Optional[int] = """gelu_new""" # Spell out all parameters in case the defaults change. lowercase__ : Tuple = GPTaConfig( vocab_size=5_02_57 , n_positions=10_24 , n_embd=10_24 , n_layer=24 , n_head=16 , n_inner=40_96 , activation_function=lowercase_ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=lowercase_ , summary_activation=lowercase_ , summary_proj_to_labels=lowercase_ , summary_first_dropout=0.1 , scale_attn_weights=lowercase_ , use_cache=lowercase_ , bos_token_id=5_02_56 , eos_token_id=5_02_56 , ) else: lowercase__ : List[str] = GPTaConfig.from_json_file(args.config_file ) lowercase__ : Optional[Any] = ["""GPT2LMHeadModel"""] # Convert. print("""Converting""" ) lowercase__ : Dict = convert_megatron_checkpoint(lowercase_ , lowercase_ , lowercase_ ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(lowercase_ , lowercase_ ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: lowercase__ : Dict = ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": lowercase__ : List[Any] = """gpt2""" elif tokenizer_type == "PretrainedFromHF": lowercase__ : str = ds_args.tokenizer_name_or_path else: raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' ) else: lowercase__ : Union[str, Any] = """gpt2""" lowercase__ : List[Any] = AutoTokenizer.from_pretrained(lowercase_ ) lowercase__ : int = type(lowercase_ ).__name__ lowercase__ : str = tokenizer_class # Store the config to file. print("""Saving config""" ) config.save_pretrained(lowercase_ ) # Save tokenizer based on args print(F'Adding {tokenizer_class} tokenizer files' ) tokenizer.save_pretrained(lowercase_ ) # Store the state_dict to file. lowercase__ : Optional[Any] = os.path.join(lowercase_ , """pytorch_model.bin""" ) print(F'Saving checkpoint to "{output_checkpoint_file}"' ) torch.save(lowercase_ , lowercase_ ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
12
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _snake_case ( unittest.TestCase ): def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , ): '''simple docstring''' lowercase__ : List[str] = size if size is not None else {"""height""": 18, """width""": 18} lowercase__ : int = parent lowercase__ : Union[str, Any] = batch_size lowercase__ : List[str] = num_channels lowercase__ : str = image_size lowercase__ : int = min_resolution lowercase__ : Dict = max_resolution lowercase__ : Tuple = do_resize lowercase__ : Union[str, Any] = size lowercase__ : Any = do_normalize lowercase__ : Tuple = image_mean lowercase__ : str = image_std def lowercase__ ( self): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class _snake_case ( UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Optional[Any] = ViTImageProcessor if is_vision_available() else None def lowercase__ ( self): '''simple docstring''' lowercase__ : str = EfficientFormerImageProcessorTester(self) @property def lowercase__ ( self): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_mean""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_std""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size""")) def lowercase__ ( self): '''simple docstring''' pass def lowercase__ ( self): '''simple docstring''' lowercase__ : str = self.image_processing_class(**self.image_processor_dict) # create random PIL images lowercase__ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image) # Test not batched input lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched lowercase__ : str = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors lowercase__ : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray) # Test not batched input lowercase__ : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched lowercase__ : Dict = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors lowercase__ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor) # Test not batched input lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched lowercase__ : Any = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , )
12
1
from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ : str = logging.get_logger(__name__) lowerCamelCase__ : str = { """huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""", } class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : int = 'autoformer' __lowerCAmelCase : str = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "student_t" , SCREAMING_SNAKE_CASE_ = "nll" , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = [1, 2, 3, 4, 5, 6, 7] , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 64 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = "gelu" , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 1_00 , SCREAMING_SNAKE_CASE_ = 0.0_2 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 25 , SCREAMING_SNAKE_CASE_ = 3 , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' lowercase__ : Any = prediction_length lowercase__ : Dict = context_length if context_length is not None else prediction_length lowercase__ : List[str] = distribution_output lowercase__ : Optional[int] = loss lowercase__ : Tuple = input_size lowercase__ : Any = num_time_features lowercase__ : Optional[Any] = lags_sequence lowercase__ : Dict = scaling lowercase__ : Dict = num_dynamic_real_features lowercase__ : Union[str, Any] = num_static_real_features lowercase__ : Any = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE_) != num_static_categorical_features: raise ValueError( """The cardinality should be a list of the same length as `num_static_categorical_features`""") lowercase__ : str = cardinality else: lowercase__ : int = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE_) != num_static_categorical_features: raise ValueError( """The embedding dimension should be a list of the same length as `num_static_categorical_features`""") lowercase__ : List[Any] = embedding_dimension else: lowercase__ : Optional[Any] = [min(50 , (cat + 1) // 2) for cat in self.cardinality] lowercase__ : Optional[Any] = num_parallel_samples # Transformer architecture configuration lowercase__ : int = input_size * len(self.lags_sequence) + self._number_of_features lowercase__ : Tuple = d_model lowercase__ : List[str] = encoder_attention_heads lowercase__ : int = decoder_attention_heads lowercase__ : Any = encoder_ffn_dim lowercase__ : List[Any] = decoder_ffn_dim lowercase__ : Tuple = encoder_layers lowercase__ : Any = decoder_layers lowercase__ : int = dropout lowercase__ : Optional[Any] = attention_dropout lowercase__ : str = activation_dropout lowercase__ : Any = encoder_layerdrop lowercase__ : List[Any] = decoder_layerdrop lowercase__ : str = activation_function lowercase__ : Union[str, Any] = init_std lowercase__ : Any = use_cache # Autoformer lowercase__ : Any = label_length lowercase__ : Optional[int] = moving_average lowercase__ : Tuple = autocorrelation_factor super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) @property def lowercase__ ( self): '''simple docstring''' return ( sum(self.embedding_dimension) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
12
lowerCamelCase__ : dict[tuple[int, int, int], int] = {} def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on lowercase__ : Tuple = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one lowercase__ : Union[str, Any] = _calculate(days - 1 , lowercase_ , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 lowercase__ : List[str] = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter lowercase__ : Dict = _calculate(days - 1 , lowercase_ , 0 ) lowercase__ : List[str] = state_late + state_absent + state_ontime lowercase__ : List[Any] = prizestrings return prizestrings def UpperCamelCase ( lowercase_ = 30 ) -> int: '''simple docstring''' return _calculate(lowercase_ , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
12
1
import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _snake_case ( UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Dict = RobertaTokenizer __lowerCAmelCase : Union[str, Any] = RobertaTokenizerFast __lowerCAmelCase : Union[str, Any] = True __lowerCAmelCase : int = {'cls_token': '<s>'} def lowercase__ ( self): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase__ : Any = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] lowercase__ : Any = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_)))) lowercase__ : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] lowercase__ : Dict = {"""unk_token""": """<unk>"""} lowercase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) lowercase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_) + """\n""") with open(self.merges_file , """w""" , encoding="""utf-8""") as fp: fp.write("""\n""".join(SCREAMING_SNAKE_CASE_)) def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' kwargs.update(self.special_tokens_map) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' kwargs.update(self.special_tokens_map) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Union[str, Any] = """lower newer""" lowercase__ : Dict = """lower newer""" return input_text, output_text def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map) lowercase__ : Union[str, Any] = """lower newer""" lowercase__ : Tuple = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] lowercase__ : Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_) # , add_prefix_space=True) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = tokens + [tokenizer.unk_token] lowercase__ : int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = self.get_tokenizer() self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=SCREAMING_SNAKE_CASE_) , [0, 3_14_14, 2_32, 3_28, 2]) self.assertListEqual( tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=SCREAMING_SNAKE_CASE_) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained("""roberta-base""") lowercase__ : Tuple = tokenizer.encode("""sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : int = tokenizer.encode("""multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = tokenizer.encode( """sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_) lowercase__ : int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def lowercase__ ( self): '''simple docstring''' lowercase__ : List[Any] = self.get_tokenizer() lowercase__ : Optional[int] = """Encode this sequence.""" lowercase__ : List[Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""")[0]] # Testing encoder arguments lowercase__ : Union[str, Any] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0])[0] self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0])[0] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) tokenizer.add_special_tokens({"""bos_token""": """<s>"""}) lowercase__ : str = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1])[0] self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) # Testing spaces after special tokens lowercase__ : int = """<mask>""" tokenizer.add_special_tokens( {"""mask_token""": AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_)}) # mask token has a left space lowercase__ : Optional[int] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) lowercase__ : Any = """Encode <mask> sequence""" lowercase__ : Any = """Encode <mask>sequence""" lowercase__ : Tuple = tokenizer.encode(SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = encoded.index(SCREAMING_SNAKE_CASE_) lowercase__ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = tokenizer.encode(SCREAMING_SNAKE_CASE_) lowercase__ : str = encoded.index(SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0] self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' pass def lowercase__ ( self): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'): lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : int = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : Any = """A, <mask> AllenNLP sentence.""" lowercase__ : Union[str, Any] = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""]) , sum(tokens_p["""token_type_ids"""])) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""]) / len(tokens_r["""attention_mask"""]) , sum(tokens_p["""attention_mask"""]) / len(tokens_p["""attention_mask"""]) , ) lowercase__ : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""]) lowercase__ : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""]) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2]) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2]) self.assertSequenceEqual( SCREAMING_SNAKE_CASE_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""]) self.assertSequenceEqual( SCREAMING_SNAKE_CASE_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""]) def lowercase__ ( self): '''simple docstring''' for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2): lowercase__ : int = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__()) lowercase__ : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__()) self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , SCREAMING_SNAKE_CASE_) self.assertEqual(post_processor_state["""add_prefix_space"""] , SCREAMING_SNAKE_CASE_) self.assertEqual(post_processor_state["""trim_offsets"""] , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'): lowercase__ : Optional[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` lowercase__ : List[str] = f'{text_of_1_token} {text_of_1_token}' lowercase__ : List[str] = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_))) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_) + 1, len(SCREAMING_SNAKE_CASE_) + 1 + len(SCREAMING_SNAKE_CASE_)) , ) lowercase__ : List[str] = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_))) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_) + 1, len(SCREAMING_SNAKE_CASE_) + 1 + len(SCREAMING_SNAKE_CASE_)) , ) lowercase__ : str = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_))) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_), len(SCREAMING_SNAKE_CASE_) + 1 + len(SCREAMING_SNAKE_CASE_)) , ) lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_))) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_), len(SCREAMING_SNAKE_CASE_) + 1 + len(SCREAMING_SNAKE_CASE_)) , ) lowercase__ : Dict = f' {text}' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) lowercase__ : int = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(SCREAMING_SNAKE_CASE_))) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_) + 1, 1 + len(SCREAMING_SNAKE_CASE_) + 1 + len(SCREAMING_SNAKE_CASE_)) , ) lowercase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE_))) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_), 1 + len(SCREAMING_SNAKE_CASE_) + 1 + len(SCREAMING_SNAKE_CASE_)) , ) lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE_))) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_), 1 + len(SCREAMING_SNAKE_CASE_) + 1 + len(SCREAMING_SNAKE_CASE_)) , )
12
import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def UpperCamelCase ( ) -> List[Any]: '''simple docstring''' raise RuntimeError("""CUDA out of memory.""" ) class _snake_case ( nn.Module ): def __init__( self): '''simple docstring''' super().__init__() lowercase__ : Optional[Any] = nn.Linear(3 , 4) lowercase__ : Union[str, Any] = nn.BatchNormad(4) lowercase__ : str = nn.Linear(4 , 5) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE_))) class _snake_case ( unittest.TestCase ): def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = [] @find_executable_batch_size(starting_batch_size=1_28) def mock_training_loop_function(SCREAMING_SNAKE_CASE_): nonlocal batch_sizes batch_sizes.append(SCREAMING_SNAKE_CASE_) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(SCREAMING_SNAKE_CASE_ , [1_28, 64, 32, 16, 8]) def lowercase__ ( self): '''simple docstring''' lowercase__ : int = [] @find_executable_batch_size(starting_batch_size=1_28) def mock_training_loop_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): nonlocal batch_sizes batch_sizes.append(SCREAMING_SNAKE_CASE_) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga lowercase__ , lowercase__ : int = mock_training_loop_function("""hello""") self.assertListEqual(SCREAMING_SNAKE_CASE_ , [1_28, 64, 32, 16, 8]) self.assertListEqual([bs, arga] , [8, """hello"""]) def lowercase__ ( self): '''simple docstring''' @find_executable_batch_size(starting_batch_size=0) def mock_training_loop_function(SCREAMING_SNAKE_CASE_): pass with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0]) def lowercase__ ( self): '''simple docstring''' @find_executable_batch_size(starting_batch_size=16) def mock_training_loop_function(SCREAMING_SNAKE_CASE_): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0]) def lowercase__ ( self): '''simple docstring''' @find_executable_batch_size(starting_batch_size=1_28) def mock_training_loop_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm: mock_training_loop_function(1_28 , """hello""" , """world""") self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0]) self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0]) def lowercase__ ( self): '''simple docstring''' @find_executable_batch_size(starting_batch_size=16) def mock_training_loop_function(SCREAMING_SNAKE_CASE_): raise ValueError("""Oops, we had an error!""") with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm: mock_training_loop_function() self.assertIn("""Oops, we had an error!""" , cm.exception.args[0]) @require_cuda def lowercase__ ( self): '''simple docstring''' lowercase__ : str = torch.cuda.memory_allocated() lowercase__ : str = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = release_memory(SCREAMING_SNAKE_CASE_) self.assertEqual(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE_)
12
1
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase_ ) class _snake_case ( UpperCAmelCase_ ): def __init__( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_) if self.framework == "tf": raise ValueError(f'The {self.__class__} is only available in PyTorch.') requires_backends(self , """vision""") self.check_model_type(SCREAMING_SNAKE_CASE_) def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' if "text_queries" in kwargs: lowercase__ : Any = kwargs.pop("""text_queries""") if isinstance(SCREAMING_SNAKE_CASE_ , (str, Image.Image)): lowercase__ : Optional[Any] = {"""image""": image, """candidate_labels""": candidate_labels} else: lowercase__ : int = image lowercase__ : List[str] = super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) return results def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = {} if "threshold" in kwargs: lowercase__ : List[Any] = kwargs["""threshold"""] if "top_k" in kwargs: lowercase__ : int = kwargs["""top_k"""] return {}, {}, postprocess_params def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : str = load_image(inputs["""image"""]) lowercase__ : Any = inputs["""candidate_labels"""] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): lowercase__ : List[str] = candidate_labels.split(""",""") lowercase__ : Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa) for i, candidate_label in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework) lowercase__ : Union[str, Any] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework) yield { "is_last": i == len(SCREAMING_SNAKE_CASE_) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : str = model_inputs.pop("""target_size""") lowercase__ : Optional[int] = model_inputs.pop("""candidate_label""") lowercase__ : Dict = model_inputs.pop("""is_last""") lowercase__ : Union[str, Any] = self.model(**SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs} return model_outputs def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=None): '''simple docstring''' lowercase__ : Union[str, Any] = [] for model_output in model_outputs: lowercase__ : Optional[int] = model_output["""candidate_label"""] lowercase__ : Tuple = BaseModelOutput(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = self.image_processor.post_process_object_detection( outputs=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ , target_sizes=model_output["""target_size"""])[0] for index in outputs["scores"].nonzero(): lowercase__ : Optional[Any] = outputs["""scores"""][index].item() lowercase__ : Optional[Any] = self._get_bounding_box(outputs["""boxes"""][index][0]) lowercase__ : Tuple = {"""score""": score, """label""": label, """box""": box} results.append(SCREAMING_SNAKE_CASE_) lowercase__ : int = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: x["score"] , reverse=SCREAMING_SNAKE_CASE_) if top_k: lowercase__ : Any = results[:top_k] return results def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' if self.framework != "pt": raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""") lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = box.int().tolist() lowercase__ : Optional[int] = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
12
import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def UpperCamelCase ( lowercase_ ) -> Any: '''simple docstring''' lowercase__ : Optional[Any] = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowercase__ : List[str] = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: lowercase__ : Optional[int] = 4 lowercase__ : Optional[Any] = 48 lowercase__ : int = """pixelshuffle_aux""" elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowercase__ : List[str] = [6, 6, 6, 6] lowercase__ : Any = 60 lowercase__ : Tuple = [6, 6, 6, 6] lowercase__ : Dict = """pixelshuffledirect""" elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowercase__ : Tuple = 4 lowercase__ : Any = """nearest+conv""" elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: lowercase__ : str = 1 lowercase__ : Optional[int] = 1 lowercase__ : Optional[int] = 1_26 lowercase__ : Any = 7 lowercase__ : int = 255.0 lowercase__ : List[Any] = """""" return config def UpperCamelCase ( lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' if "patch_embed.proj" in name and "layers" not in name: lowercase__ : Dict = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: lowercase__ : Dict = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" ) if "layers" in name: lowercase__ : List[str] = name.replace("""layers""" , """encoder.stages""" ) if "residual_group.blocks" in name: lowercase__ : Optional[int] = name.replace("""residual_group.blocks""" , """layers""" ) if "attn.proj" in name: lowercase__ : int = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: lowercase__ : Tuple = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: lowercase__ : int = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: lowercase__ : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: lowercase__ : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowercase__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" ) if "q_bias" in name: lowercase__ : Any = name.replace("""q_bias""" , """query.bias""" ) if "k_bias" in name: lowercase__ : Optional[Any] = name.replace("""k_bias""" , """key.bias""" ) if "v_bias" in name: lowercase__ : Dict = name.replace("""v_bias""" , """value.bias""" ) if "cpb_mlp" in name: lowercase__ : Union[str, Any] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" ) if "patch_embed.proj" in name: lowercase__ : List[Any] = name.replace("""patch_embed.proj""" , """patch_embed.projection""" ) if name == "norm.weight": lowercase__ : Union[str, Any] = """layernorm.weight""" if name == "norm.bias": lowercase__ : List[str] = """layernorm.bias""" if "conv_first" in name: lowercase__ : Union[str, Any] = name.replace("""conv_first""" , """first_convolution""" ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: lowercase__ : List[Any] = name.replace("""conv_last""" , """final_convolution""" ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: lowercase__ : Optional[int] = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" ) if "upsample.0" in name: lowercase__ : Dict = name.replace("""upsample.0""" , """upsample.convolution_0""" ) if "upsample.2" in name: lowercase__ : Optional[Any] = name.replace("""upsample.2""" , """upsample.convolution_1""" ) lowercase__ : List[str] = """upsample.""" + name elif config.upsampler == "pixelshuffledirect": lowercase__ : Optional[Any] = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" ) lowercase__ : int = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" ) else: pass else: lowercase__ : str = """swin2sr.""" + name return name def UpperCamelCase ( lowercase_ , lowercase_ ) -> int: '''simple docstring''' for key in orig_state_dict.copy().keys(): lowercase__ : str = orig_state_dict.pop(lowercase_ ) if "qkv" in key: lowercase__ : Any = key.split(""".""" ) lowercase__ : List[Any] = int(key_split[1] ) lowercase__ : Dict = int(key_split[4] ) lowercase__ : Optional[Any] = config.embed_dim if "weight" in key: lowercase__ : List[str] = val[:dim, :] lowercase__ : List[str] = val[dim : dim * 2, :] lowercase__ : Optional[Any] = val[-dim:, :] else: lowercase__ : Optional[Any] = val[:dim] lowercase__ : List[Any] = val[dim : dim * 2] lowercase__ : Optional[int] = val[-dim:] pass else: lowercase__ : Optional[Any] = val return orig_state_dict def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' lowercase__ : Dict = get_config(lowercase_ ) lowercase__ : Any = SwinaSRForImageSuperResolution(lowercase_ ) model.eval() lowercase__ : List[str] = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" ) lowercase__ : Union[str, Any] = convert_state_dict(lowercase_ , lowercase_ ) lowercase__ , lowercase__ : Dict = model.load_state_dict(lowercase_ , strict=lowercase_ ) if len(lowercase_ ) > 0: raise ValueError("""Missing keys when converting: {}""".format(lowercase_ ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(F'Unexpected key {key} in state_dict' ) # verify values lowercase__ : Any = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true""" lowercase__ : Any = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert("""RGB""" ) lowercase__ : Any = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values lowercase__ : Optional[int] = 1_26 if """Jpeg""" in checkpoint_url else 2_56 lowercase__ : Union[str, Any] = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) lowercase__ : Dict = transforms(lowercase_ ).unsqueeze(0 ) if config.num_channels == 1: lowercase__ : Any = pixel_values[:, 0, :, :].unsqueeze(1 ) lowercase__ : Union[str, Any] = model(lowercase_ ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: lowercase__ : Optional[Any] = torch.Size([1, 3, 5_12, 5_12] ) lowercase__ : Optional[Any] = torch.tensor( [[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowercase__ : List[str] = torch.Size([1, 3, 10_24, 10_24] ) lowercase__ : int = torch.tensor( [[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here lowercase__ : Optional[Any] = torch.Size([1, 3, 10_24, 10_24] ) lowercase__ : int = torch.tensor( [[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowercase__ : Tuple = torch.Size([1, 3, 5_12, 5_12] ) lowercase__ : int = torch.tensor( [[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowercase__ : Tuple = torch.Size([1, 3, 10_24, 10_24] ) lowercase__ : int = torch.tensor( [[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] ) assert ( outputs.reconstruction.shape == expected_shape ), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowercase_ , atol=1E-3 ) print("""Looks ok!""" ) lowercase__ : str = { """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": ( """swin2SR-classical-sr-x2-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": ( """swin2SR-classical-sr-x4-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": ( """swin2SR-compressed-sr-x4-48""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": ( """swin2SR-lightweight-x2-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": ( """swin2SR-realworld-sr-x4-64-bsrgan-psnr""" ), } lowercase__ : str = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowercase_ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(lowercase_ ) if push_to_hub: model.push_to_hub(F'caidas/{model_name}' ) processor.push_to_hub(F'caidas/{model_name}' ) if __name__ == "__main__": lowerCamelCase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""", type=str, help="""URL of the original Swin2SR checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""") lowerCamelCase__ : Any = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
12
1
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowerCamelCase__ : Any = logging.get_logger(__name__) lowerCamelCase__ : List[Any] = { """t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""", } class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : List[str] = 't5' __lowerCAmelCase : List[str] = ['past_key_values'] __lowerCAmelCase : List[str] = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self , SCREAMING_SNAKE_CASE_=3_21_28 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=1_28 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1E-6 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' lowercase__ : Optional[Any] = vocab_size lowercase__ : Union[str, Any] = d_model lowercase__ : Union[str, Any] = d_kv lowercase__ : List[Any] = d_ff lowercase__ : Optional[Any] = num_layers lowercase__ : Union[str, Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowercase__ : Any = num_heads lowercase__ : Tuple = relative_attention_num_buckets lowercase__ : Union[str, Any] = relative_attention_max_distance lowercase__ : str = dropout_rate lowercase__ : Dict = layer_norm_epsilon lowercase__ : Optional[int] = initializer_factor lowercase__ : str = feed_forward_proj lowercase__ : Optional[int] = use_cache lowercase__ : str = self.feed_forward_proj.split("""-""") lowercase__ : Tuple = act_info[-1] lowercase__ : Optional[int] = act_info[0] == """gated""" if len(SCREAMING_SNAKE_CASE_) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE_) > 2: raise ValueError( f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' """Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """ """'gated-gelu' or 'relu'""") # for backwards compatibility if feed_forward_proj == "gated-gelu": lowercase__ : str = """gelu_new""" super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) class _snake_case ( UpperCAmelCase_ ): @property def lowercase__ ( self): '''simple docstring''' lowercase__ : str = { """input_ids""": {0: """batch""", 1: """encoder_sequence"""}, """attention_mask""": {0: """batch""", 1: """encoder_sequence"""}, } if self.use_past: lowercase__ : List[str] = """past_encoder_sequence + sequence""" lowercase__ : int = {0: """batch"""} lowercase__ : Optional[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: lowercase__ : int = {0: """batch""", 1: """decoder_sequence"""} lowercase__ : Optional[int] = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction="""inputs""") return common_inputs @property def lowercase__ ( self): '''simple docstring''' return 13
12
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : BigBirdConfig __lowerCAmelCase : jnp.dtype = jnp.floataa __lowerCAmelCase : bool = True def lowercase__ ( self): '''simple docstring''' super().setup() lowercase__ : Dict = nn.Dense(5 , dtype=self.dtype) def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : List[str] = super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = self.cls(outputs[2]) return outputs[:2] + (cls_out,) class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Optional[int] = FlaxBigBirdForNaturalQuestionsModule def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' def cross_entropy(lowercase_ , lowercase_ , lowercase_=None ): lowercase__ : int = logits.shape[-1] lowercase__ : List[str] = (labels[..., None] == jnp.arange(lowercase_ )[None]).astype("""f4""" ) lowercase__ : int = jax.nn.log_softmax(lowercase_ , axis=-1 ) lowercase__ : Any = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: lowercase__ : Optional[int] = reduction(lowercase_ ) return loss lowercase__ : int = partial(lowercase_ , reduction=jnp.mean ) lowercase__ : Tuple = cross_entropy(lowercase_ , lowercase_ ) lowercase__ : List[Any] = cross_entropy(lowercase_ , lowercase_ ) lowercase__ : Union[str, Any] = cross_entropy(lowercase_ , lowercase_ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class _snake_case : __lowerCAmelCase : str = "google/bigbird-roberta-base" __lowerCAmelCase : int = 3_000 __lowerCAmelCase : int = 10_500 __lowerCAmelCase : int = 128 __lowerCAmelCase : int = 3 __lowerCAmelCase : int = 1 __lowerCAmelCase : int = 5 # tx_args __lowerCAmelCase : float = 3e-5 __lowerCAmelCase : float = 0.0 __lowerCAmelCase : int = 20_000 __lowerCAmelCase : float = 0.0_095 __lowerCAmelCase : str = "bigbird-roberta-natural-questions" __lowerCAmelCase : str = "training-expt" __lowerCAmelCase : str = "data/nq-training.jsonl" __lowerCAmelCase : str = "data/nq-validation.jsonl" def lowercase__ ( self): '''simple docstring''' os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE_) lowercase__ : Any = os.path.join(self.base_dir , self.save_dir) lowercase__ : str = self.batch_size_per_device * jax.device_count() @dataclass class _snake_case : __lowerCAmelCase : int __lowerCAmelCase : int = 4_096 # no dynamic padding on TPUs def __call__( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Dict = self.collate_fn(SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) return batch def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ , lowercase__ : str = self.fetch_inputs(features["""input_ids"""]) lowercase__ : str = { """input_ids""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa), """attention_mask""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa), """start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa), """end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa), """pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa), } return batch def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : List[Any] = [self._fetch_inputs(SCREAMING_SNAKE_CASE_) for ids in input_ids] return zip(*SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = [1 for _ in range(len(SCREAMING_SNAKE_CASE_))] while len(SCREAMING_SNAKE_CASE_) < self.max_length: input_ids.append(self.pad_id) attention_mask.append(0) return input_ids, attention_mask def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=None ) -> Optional[Any]: '''simple docstring''' if seed is not None: lowercase__ : Any = dataset.shuffle(seed=lowercase_ ) for i in range(len(lowercase_ ) // batch_size ): lowercase__ : List[str] = dataset[i * batch_size : (i + 1) * batch_size] yield dict(lowercase_ ) @partial(jax.pmap , axis_name="""batch""" ) def UpperCamelCase ( lowercase_ , lowercase_ , **lowercase_ ) -> int: '''simple docstring''' def loss_fn(lowercase_ ): lowercase__ : Dict = model_inputs.pop("""start_labels""" ) lowercase__ : List[Any] = model_inputs.pop("""end_labels""" ) lowercase__ : List[Any] = model_inputs.pop("""pooled_labels""" ) lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=lowercase_ , dropout_rng=lowercase_ , train=lowercase_ ) lowercase__ , lowercase__ , lowercase__ : Any = outputs return state.loss_fn( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) lowercase__ , lowercase__ : Optional[int] = jax.random.split(lowercase_ ) lowercase__ : Tuple = jax.value_and_grad(lowercase_ ) lowercase__ , lowercase__ : Optional[int] = grad_fn(state.params ) lowercase__ : Tuple = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) lowercase__ : Any = jax.lax.pmean(lowercase_ , """batch""" ) lowercase__ : str = state.apply_gradients(grads=lowercase_ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="""batch""" ) def UpperCamelCase ( lowercase_ , **lowercase_ ) -> str: '''simple docstring''' lowercase__ : Tuple = model_inputs.pop("""start_labels""" ) lowercase__ : List[str] = model_inputs.pop("""end_labels""" ) lowercase__ : int = model_inputs.pop("""pooled_labels""" ) lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=state.params , train=lowercase_ ) lowercase__ , lowercase__ , lowercase__ : Optional[int] = outputs lowercase__ : Optional[Any] = state.loss_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : List[str] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) return metrics class _snake_case ( train_state.TrainState ): __lowerCAmelCase : Callable = struct.field(pytree_node=UpperCAmelCase_ ) @dataclass class _snake_case : __lowerCAmelCase : Args __lowerCAmelCase : Callable __lowerCAmelCase : Callable __lowerCAmelCase : Callable __lowerCAmelCase : Callable __lowerCAmelCase : wandb __lowerCAmelCase : Callable = None def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None): '''simple docstring''' lowercase__ : List[str] = model.params lowercase__ : Dict = TrainState.create( apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , loss_fn=SCREAMING_SNAKE_CASE_ , ) if ckpt_dir is not None: lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = restore_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : str = { """lr""": args.lr, """init_lr""": args.init_lr, """warmup_steps""": args.warmup_steps, """num_train_steps""": num_train_steps, """weight_decay""": args.weight_decay, } lowercase__ , lowercase__ : Any = build_tx(**SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = train_state.TrainState( step=SCREAMING_SNAKE_CASE_ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , opt_state=SCREAMING_SNAKE_CASE_ , ) lowercase__ : Optional[Any] = args lowercase__ : Union[str, Any] = data_collator lowercase__ : str = lr lowercase__ : Union[str, Any] = params lowercase__ : Dict = jax_utils.replicate(SCREAMING_SNAKE_CASE_) return state def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = self.args lowercase__ : List[str] = len(SCREAMING_SNAKE_CASE_) // args.batch_size lowercase__ : int = jax.random.PRNGKey(0) lowercase__ : Union[str, Any] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count()) for epoch in range(args.max_epochs): lowercase__ : Tuple = jnp.array(0 , dtype=jnp.floataa) lowercase__ : List[str] = get_batched_dataset(SCREAMING_SNAKE_CASE_ , args.batch_size , seed=SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = 0 for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc=f'Running EPOCH-{epoch}'): lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_) lowercase__ , lowercase__ , lowercase__ : List[Any] = self.train_step_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) running_loss += jax_utils.unreplicate(metrics["""loss"""]) i += 1 if i % args.logging_steps == 0: lowercase__ : List[str] = jax_utils.unreplicate(state.step) lowercase__ : str = running_loss.item() / i lowercase__ : Tuple = self.scheduler_fn(state_step - 1) lowercase__ : Tuple = self.evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = { """step""": state_step.item(), """eval_loss""": eval_loss.item(), """tr_loss""": tr_loss, """lr""": lr.item(), } tqdm.write(str(SCREAMING_SNAKE_CASE_)) self.logger.log(SCREAMING_SNAKE_CASE_ , commit=SCREAMING_SNAKE_CASE_) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Dict = get_batched_dataset(SCREAMING_SNAKE_CASE_ , self.args.batch_size) lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE_) // self.args.batch_size lowercase__ : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa) lowercase__ : Optional[Any] = 0 for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc="""Evaluating ... """): lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = self.val_step_fn(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) running_loss += jax_utils.unreplicate(metrics["""loss"""]) i += 1 return running_loss / i def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = jax_utils.unreplicate(SCREAMING_SNAKE_CASE_) print(f'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """) self.model_save_fn(SCREAMING_SNAKE_CASE_ , params=state.params) with open(os.path.join(SCREAMING_SNAKE_CASE_ , """opt_state.msgpack""") , """wb""") as f: f.write(to_bytes(state.opt_state)) joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE_ , """args.joblib""")) joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE_ , """data_collator.joblib""")) with open(os.path.join(SCREAMING_SNAKE_CASE_ , """training_state.json""") , """w""") as f: json.dump({"""step""": state.step.item()} , SCREAMING_SNAKE_CASE_) print("""DONE""") def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ ) with open(os.path.join(lowercase_ , """flax_model.msgpack""" ) , """rb""" ) as f: lowercase__ : Optional[Any] = from_bytes(state.params , f.read() ) with open(os.path.join(lowercase_ , """opt_state.msgpack""" ) , """rb""" ) as f: lowercase__ : Dict = from_bytes(state.opt_state , f.read() ) lowercase__ : Any = joblib.load(os.path.join(lowercase_ , """args.joblib""" ) ) lowercase__ : Optional[int] = joblib.load(os.path.join(lowercase_ , """data_collator.joblib""" ) ) with open(os.path.join(lowercase_ , """training_state.json""" ) , """r""" ) as f: lowercase__ : int = json.load(lowercase_ ) lowercase__ : Optional[Any] = training_state["""step"""] print("""DONE""" ) return params, opt_state, step, args, data_collator def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' lowercase__ : Optional[int] = num_train_steps - warmup_steps lowercase__ : int = optax.linear_schedule(init_value=lowercase_ , end_value=lowercase_ , transition_steps=lowercase_ ) lowercase__ : Optional[int] = optax.linear_schedule(init_value=lowercase_ , end_value=1E-7 , transition_steps=lowercase_ ) lowercase__ : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]: '''simple docstring''' def weight_decay_mask(lowercase_ ): lowercase__ : Dict = traverse_util.flatten_dict(lowercase_ ) lowercase__ : int = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()} return traverse_util.unflatten_dict(lowercase_ ) lowercase__ : Optional[int] = scheduler_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : int = optax.adamw(learning_rate=lowercase_ , weight_decay=lowercase_ , mask=lowercase_ ) return tx, lr
12
1
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' lowercase__ : Dict = multiprocessing.Manager() lowercase__ : Optional[int] = manager.list() lowercase__ : str = multiprocessing.Process(target=lowercase_ , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append("""timed out""" ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple: '''simple docstring''' with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil lowercase__ : Optional[int] = shutil.rmtree lowercase__ : Dict = os.rmdir lowercase__ : int = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: lowercase__ : Any = {} with swallow_io(): with time_limit(lowercase_ ): exec(lowercase_ , lowercase_ ) result.append("""passed""" ) except TimeoutException: result.append("""timed out""" ) except BaseException as e: result.append(F'failed: {e}' ) # Needed for cleaning up. lowercase__ : List[Any] = rmtree lowercase__ : int = rmdir lowercase__ : int = chdir @contextlib.contextmanager def UpperCamelCase ( lowercase_ ) -> str: '''simple docstring''' def signal_handler(lowercase_ , lowercase_ ): raise TimeoutException("""Timed out!""" ) signal.setitimer(signal.ITIMER_REAL , lowercase_ ) signal.signal(signal.SIGALRM , lowercase_ ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def UpperCamelCase ( ) -> Dict: '''simple docstring''' lowercase__ : Union[str, Any] = WriteOnlyStringIO() with contextlib.redirect_stdout(lowercase_ ): with contextlib.redirect_stderr(lowercase_ ): with redirect_stdin(lowercase_ ): yield @contextlib.contextmanager def UpperCamelCase ( ) -> Union[str, Any]: '''simple docstring''' with tempfile.TemporaryDirectory() as dirname: with chdir(lowercase_ ): yield dirname class _snake_case ( UpperCAmelCase_ ): pass class _snake_case ( io.StringIO ): def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' raise OSError def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' raise OSError def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' raise OSError def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return False class _snake_case ( contextlib._RedirectStream ): # type: ignore __lowerCAmelCase : Any = 'stdin' @contextlib.contextmanager def UpperCamelCase ( lowercase_ ) -> Optional[int]: '''simple docstring''' if root == ".": yield return lowercase__ : Any = os.getcwd() os.chdir(lowercase_ ) try: yield except BaseException as exc: raise exc finally: os.chdir(lowercase_ ) def UpperCamelCase ( lowercase_=None ) -> Union[str, Any]: '''simple docstring''' if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins lowercase__ : Union[str, Any] = None lowercase__ : int = None import os lowercase__ : Union[str, Any] = """1""" lowercase__ : Union[str, Any] = None lowercase__ : List[Any] = None lowercase__ : List[Any] = None lowercase__ : Tuple = None lowercase__ : List[Any] = None lowercase__ : List[Any] = None lowercase__ : str = None lowercase__ : int = None lowercase__ : Optional[Any] = None lowercase__ : str = None lowercase__ : List[str] = None lowercase__ : Union[str, Any] = None lowercase__ : List[Any] = None lowercase__ : str = None lowercase__ : Tuple = None lowercase__ : int = None lowercase__ : Optional[int] = None lowercase__ : Optional[Any] = None lowercase__ : int = None lowercase__ : str = None lowercase__ : str = None lowercase__ : Optional[int] = None lowercase__ : List[str] = None lowercase__ : List[str] = None lowercase__ : Union[str, Any] = None lowercase__ : Union[str, Any] = None lowercase__ : List[Any] = None import shutil lowercase__ : Any = None lowercase__ : Optional[int] = None lowercase__ : Any = None import subprocess lowercase__ : Union[str, Any] = None # type: ignore lowercase__ : Optional[int] = None import sys lowercase__ : Dict = None lowercase__ : Dict = None lowercase__ : Tuple = None lowercase__ : Optional[Any] = None lowercase__ : Any = None
12
lowerCamelCase__ : List[str] = """ # Installazione di Transformers ! pip install transformers datasets # Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git """ lowerCamelCase__ : List[Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowerCamelCase__ : int = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
12
1
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class _snake_case ( UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Dict = FlaxAutoencoderKL @property def lowercase__ ( self): '''simple docstring''' lowercase__ : str = 4 lowercase__ : str = 3 lowercase__ : Union[str, Any] = (32, 32) lowercase__ : Optional[Any] = jax.random.PRNGKey(0) lowercase__ : int = jax.random.uniform(SCREAMING_SNAKE_CASE_ , ((batch_size, num_channels) + sizes)) return {"sample": image, "prng_key": prng_key} def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } lowercase__ : List[str] = self.dummy_input return init_dict, inputs_dict
12
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class _snake_case : def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ): '''simple docstring''' lowercase__ : str = parent lowercase__ : Optional[int] = batch_size lowercase__ : Optional[int] = seq_length lowercase__ : Union[str, Any] = is_training lowercase__ : Any = use_input_mask lowercase__ : Optional[int] = use_token_type_ids lowercase__ : Optional[Any] = use_labels lowercase__ : Optional[int] = vocab_size lowercase__ : Optional[Any] = hidden_size lowercase__ : Any = rotary_dim lowercase__ : Optional[Any] = num_hidden_layers lowercase__ : Tuple = num_attention_heads lowercase__ : Tuple = intermediate_size lowercase__ : List[str] = hidden_act lowercase__ : Optional[Any] = hidden_dropout_prob lowercase__ : int = attention_probs_dropout_prob lowercase__ : Any = max_position_embeddings lowercase__ : Optional[int] = initializer_range lowercase__ : Optional[int] = None lowercase__ : str = vocab_size - 1 lowercase__ : Any = vocab_size - 1 lowercase__ : Dict = vocab_size - 1 def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ : Any = None if self.use_input_mask: lowercase__ : Dict = random_attention_mask([self.batch_size, self.seq_length]) lowercase__ : List[Any] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Optional[Any] = config_and_inputs lowercase__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Tuple = 20 lowercase__ : int = model_class_name(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_) lowercase__ : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""") lowercase__ : Tuple = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1)) lowercase__ : List[str] = model( input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""") lowercase__ : str = model( input_ids[:, -1:] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE_) lowercase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}') def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Union[str, Any] = 20 lowercase__ : List[Any] = model_class_name(SCREAMING_SNAKE_CASE_) lowercase__ : Dict = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , ) lowercase__ : Dict = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1)) lowercase__ : Any = model( input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""") lowercase__ : Tuple = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , ) lowercase__ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_) lowercase__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}') @require_flax class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Dict = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __lowerCAmelCase : str = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = FlaxGPTJModelTester(self) def lowercase__ ( self): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__ , lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__ , lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) @tooslow def lowercase__ ( self): '''simple docstring''' lowercase__ : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""") lowercase__ : List[str] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""") lowercase__ : Optional[Any] = False lowercase__ : List[str] = model.config.eos_token_id lowercase__ : List[Any] = jax.jit(model.generate) lowercase__ : Tuple = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id).sequences lowercase__ : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) @is_pt_flax_cross_test def lowercase__ ( self): '''simple docstring''' lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Any = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning lowercase__ : str = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ , lowercase__ : Dict = pt_inputs["""input_ids"""].shape lowercase__ : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,)) for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : str = 0 lowercase__ : List[Any] = 1 lowercase__ : Dict = 0 lowercase__ : Any = 1 lowercase__ : List[Any] = pt_model_class(SCREAMING_SNAKE_CASE_).eval() lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa) lowercase__ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = fx_state with torch.no_grad(): lowercase__ : Optional[int] = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple() lowercase__ : Dict = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_) lowercase__ : str = fx_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual( len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output_loaded, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2) @is_pt_flax_cross_test def lowercase__ ( self): '''simple docstring''' lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs lowercase__ : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : str = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning lowercase__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : str = pt_model_class(SCREAMING_SNAKE_CASE_).eval() lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa) lowercase__ : Optional[int] = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , fx_model.params) lowercase__ , lowercase__ : str = pt_inputs["""input_ids"""].shape lowercase__ : List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,)) for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_): lowercase__ : Tuple = 0 lowercase__ : int = 1 lowercase__ : str = 0 lowercase__ : str = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): lowercase__ : Dict = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple() lowercase__ : Optional[Any] = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = pt_model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_flax=SCREAMING_SNAKE_CASE_) with torch.no_grad(): lowercase__ : Tuple = pt_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple() self.assertEqual( len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""") for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) @tooslow def lowercase__ ( self): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__ : Any = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""") lowercase__ : int = model(np.ones((1, 1))) self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
12
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class _snake_case ( unittest.TestCase ): def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = tempfile.mkdtemp() lowercase__ : str = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens])) lowercase__ : Optional[Any] = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], """image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } lowercase__ : int = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_) with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' shutil.rmtree(self.tmpdirname) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)] lowercase__ : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1)) for x in image_inputs] return image_inputs def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = self.get_tokenizer() lowercase__ : Optional[int] = self.get_rust_tokenizer() lowercase__ : Optional[int] = self.get_image_processor() lowercase__ : Optional[int] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_) processor_slow.save_pretrained(self.tmpdirname) lowercase__ : List[str] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_) processor_fast.save_pretrained(self.tmpdirname) lowercase__ : Optional[Any] = AlignProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE_) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE_) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE_) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) lowercase__ : int = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""") lowercase__ : Optional[int] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0) lowercase__ : Dict = AlignProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = self.get_image_processor() lowercase__ : Dict = self.get_tokenizer() lowercase__ : Dict = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_) lowercase__ : Any = self.prepare_image_inputs() lowercase__ : List[str] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""np""") lowercase__ : Any = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""np""") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2) def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = self.get_image_processor() lowercase__ : List[str] = self.get_tokenizer() lowercase__ : Optional[int] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = """lower newer""" lowercase__ : Dict = processor(text=SCREAMING_SNAKE_CASE_) lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=64) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def lowercase__ ( self): '''simple docstring''' lowercase__ : List[Any] = self.get_image_processor() lowercase__ : Any = self.get_tokenizer() lowercase__ : Tuple = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_) lowercase__ : Any = """lower newer""" lowercase__ : Union[str, Any] = self.prepare_image_inputs() lowercase__ : List[str] = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_) self.assertListEqual(list(inputs.keys()) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""]) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE_): processor() def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = self.get_image_processor() lowercase__ : Any = self.get_tokenizer() lowercase__ : str = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase__ : int = processor.batch_decode(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = self.get_image_processor() lowercase__ : Optional[int] = self.get_tokenizer() lowercase__ : Tuple = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = """lower newer""" lowercase__ : Any = self.prepare_image_inputs() lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_) self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
12
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Any = ['image_processor', 'tokenizer'] __lowerCAmelCase : Union[str, Any] = 'AutoImageProcessor' __lowerCAmelCase : int = 'AutoTokenizer' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = self.image_processor def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""") if text is not None: lowercase__ : List[str] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) if images is not None: lowercase__ : Optional[int] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) if text is not None and images is not None: lowercase__ : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_) , tensor_type=SCREAMING_SNAKE_CASE_) def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) @property def lowercase__ ( self): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
12
1
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class _snake_case ( UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = ShapEImgaImgPipeline __lowerCAmelCase : Optional[int] = ['image'] __lowerCAmelCase : str = ['image'] __lowerCAmelCase : List[str] = [ 'num_images_per_prompt', 'num_inference_steps', 'generator', 'latents', 'guidance_scale', 'frame_size', 'output_type', 'return_dict', ] __lowerCAmelCase : Union[str, Any] = False @property def lowercase__ ( self): '''simple docstring''' return 32 @property def lowercase__ ( self): '''simple docstring''' return 32 @property def lowercase__ ( self): '''simple docstring''' return self.time_input_dim * 4 @property def lowercase__ ( self): '''simple docstring''' return 8 @property def lowercase__ ( self): '''simple docstring''' torch.manual_seed(0) lowercase__ : List[str] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) lowercase__ : Any = CLIPVisionModel(SCREAMING_SNAKE_CASE_) return model @property def lowercase__ ( self): '''simple docstring''' lowercase__ : str = CLIPImageProcessor( crop_size=2_24 , do_center_crop=SCREAMING_SNAKE_CASE_ , do_normalize=SCREAMING_SNAKE_CASE_ , do_resize=SCREAMING_SNAKE_CASE_ , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=2_24 , ) return image_processor @property def lowercase__ ( self): '''simple docstring''' torch.manual_seed(0) lowercase__ : int = { """num_attention_heads""": 2, """attention_head_dim""": 16, """embedding_dim""": self.time_input_dim, """num_embeddings""": 32, """embedding_proj_dim""": self.text_embedder_hidden_size, """time_embed_dim""": self.time_embed_dim, """num_layers""": 1, """clip_embed_dim""": self.time_input_dim * 2, """additional_embeddings""": 0, """time_embed_act_fn""": """gelu""", """norm_in_type""": """layer""", """embedding_proj_norm_type""": """layer""", """encoder_hid_proj_type""": None, """added_emb_type""": None, } lowercase__ : List[Any] = PriorTransformer(**SCREAMING_SNAKE_CASE_) return model @property def lowercase__ ( self): '''simple docstring''' torch.manual_seed(0) lowercase__ : Dict = { """param_shapes""": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), """d_latent""": self.time_input_dim, """d_hidden""": self.renderer_dim, """n_output""": 12, """background""": ( 0.1, 0.1, 0.1, ), } lowercase__ : int = ShapERenderer(**SCREAMING_SNAKE_CASE_) return model def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = self.dummy_prior lowercase__ : List[str] = self.dummy_image_encoder lowercase__ : Union[str, Any] = self.dummy_image_processor lowercase__ : List[Any] = self.dummy_renderer lowercase__ : Optional[Any] = HeunDiscreteScheduler( beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=1.0 , ) lowercase__ : Tuple = { """prior""": prior, """image_encoder""": image_encoder, """image_processor""": image_processor, """renderer""": renderer, """scheduler""": scheduler, } return components def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0): '''simple docstring''' lowercase__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_)).to(SCREAMING_SNAKE_CASE_) if str(SCREAMING_SNAKE_CASE_).startswith("""mps"""): lowercase__ : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_) else: lowercase__ : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_).manual_seed(SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = { """image""": input_image, """generator""": generator, """num_inference_steps""": 1, """frame_size""": 32, """output_type""": """np""", } return inputs def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = """cpu""" lowercase__ : Dict = self.get_dummy_components() lowercase__ : int = self.pipeline_class(**SCREAMING_SNAKE_CASE_) lowercase__ : Any = pipe.to(SCREAMING_SNAKE_CASE_) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_) lowercase__ : Any = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_)) lowercase__ : int = output.images[0] lowercase__ : int = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) lowercase__ : Optional[int] = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 def lowercase__ ( self): '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = torch_device == """cpu""" lowercase__ : Union[str, Any] = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , ) def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = self.get_dummy_components() lowercase__ : int = self.pipeline_class(**SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = pipe.to(SCREAMING_SNAKE_CASE_) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_) lowercase__ : List[Any] = 1 lowercase__ : Dict = 2 lowercase__ : List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_) for key in inputs.keys(): if key in self.batch_params: lowercase__ : Tuple = batch_size * [inputs[key]] lowercase__ : Tuple = pipe(**SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_)[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class _snake_case ( unittest.TestCase ): def lowercase__ ( self): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self): '''simple docstring''' lowercase__ : List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""") lowercase__ : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/test_shap_e_img2img_out.npy""") lowercase__ : Optional[Any] = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""") lowercase__ : List[str] = pipe.to(SCREAMING_SNAKE_CASE_) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_) lowercase__ : int = torch.Generator(device=SCREAMING_SNAKE_CASE_).manual_seed(0) lowercase__ : Optional[int] = pipe( SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
12
def UpperCamelCase ( lowercase_ ) -> int: '''simple docstring''' if n == 1 or not isinstance(lowercase_ , lowercase_ ): return 0 elif n == 2: return 1 else: lowercase__ : List[Any] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def UpperCamelCase ( lowercase_ ) -> int: '''simple docstring''' lowercase__ : Optional[Any] = 0 lowercase__ : Dict = 2 while digits < n: index += 1 lowercase__ : str = len(str(fibonacci(lowercase_ ) ) ) return index def UpperCamelCase ( lowercase_ = 10_00 ) -> int: '''simple docstring''' return fibonacci_digits_index(lowercase_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
12
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class _snake_case ( unittest.TestCase ): def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = tempfile.mkdtemp() # fmt: off lowercase__ : str = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on lowercase__ : Tuple = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_)))) lowercase__ : Dict = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] lowercase__ : List[str] = {"""unk_token""": """<unk>"""} lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_) + """\n""") with open(self.merges_file , """w""" , encoding="""utf-8""") as fp: fp.write("""\n""".join(SCREAMING_SNAKE_CASE_)) lowercase__ : Dict = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], """image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } lowercase__ : List[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_) with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' shutil.rmtree(self.tmpdirname) def lowercase__ ( self): '''simple docstring''' lowercase__ : int = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)] lowercase__ : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1)) for x in image_inputs] return image_inputs def lowercase__ ( self): '''simple docstring''' lowercase__ : str = self.get_tokenizer() lowercase__ : Optional[Any] = self.get_rust_tokenizer() lowercase__ : int = self.get_image_processor() lowercase__ : Dict = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_) processor_slow.save_pretrained(self.tmpdirname) lowercase__ : List[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_) lowercase__ : str = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_) processor_fast.save_pretrained(self.tmpdirname) lowercase__ : int = CLIPSegProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE_) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE_) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE_) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) lowercase__ : Any = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""") lowercase__ : Tuple = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0) lowercase__ : Union[str, Any] = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : int = self.get_image_processor() lowercase__ : int = self.get_tokenizer() lowercase__ : Dict = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = self.prepare_image_inputs() lowercase__ : Optional[int] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""np""") lowercase__ : Union[str, Any] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""np""") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = self.get_image_processor() lowercase__ : str = self.get_tokenizer() lowercase__ : Tuple = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = """lower newer""" lowercase__ : Optional[int] = processor(text=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE_) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = self.get_image_processor() lowercase__ : List[str] = self.get_tokenizer() lowercase__ : Optional[Any] = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_) lowercase__ : int = """lower newer""" lowercase__ : Union[str, Any] = self.prepare_image_inputs() lowercase__ : Tuple = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_) self.assertListEqual(list(inputs.keys()) , ["""input_ids""", """attention_mask""", """pixel_values"""]) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE_): processor() def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = self.get_image_processor() lowercase__ : Any = self.get_tokenizer() lowercase__ : Optional[Any] = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = self.prepare_image_inputs() lowercase__ : List[Any] = self.prepare_image_inputs() lowercase__ : Union[str, Any] = processor(images=SCREAMING_SNAKE_CASE_ , visual_prompt=SCREAMING_SNAKE_CASE_) self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """conditional_pixel_values"""]) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE_): processor() def lowercase__ ( self): '''simple docstring''' lowercase__ : int = self.get_image_processor() lowercase__ : Tuple = self.get_tokenizer() lowercase__ : Optional[int] = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase__ : int = processor.batch_decode(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
12
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter lowerCamelCase__ : Any = """Create a default config file for Accelerate with only a few flags set.""" def UpperCamelCase ( lowercase_="no" , lowercase_ = default_json_config_file , lowercase_ = False ) -> Any: '''simple docstring''' lowercase__ : Any = Path(lowercase_ ) path.parent.mkdir(parents=lowercase_ , exist_ok=lowercase_ ) if path.exists(): print( F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' ) return False lowercase__ : int = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' ) lowercase__ : Dict = { """compute_environment""": """LOCAL_MACHINE""", """mixed_precision""": mixed_precision, } if torch.cuda.is_available(): lowercase__ : Any = torch.cuda.device_count() lowercase__ : Any = num_gpus lowercase__ : Optional[int] = False if num_gpus > 1: lowercase__ : Tuple = """MULTI_GPU""" else: lowercase__ : Optional[Any] = """NO""" elif is_xpu_available() and use_xpu: lowercase__ : Union[str, Any] = torch.xpu.device_count() lowercase__ : str = num_xpus lowercase__ : List[Any] = False if num_xpus > 1: lowercase__ : str = """MULTI_XPU""" else: lowercase__ : Optional[Any] = """NO""" elif is_npu_available(): lowercase__ : Tuple = torch.npu.device_count() lowercase__ : Union[str, Any] = num_npus lowercase__ : Union[str, Any] = False if num_npus > 1: lowercase__ : List[Any] = """MULTI_NPU""" else: lowercase__ : int = """NO""" else: lowercase__ : Union[str, Any] = 0 lowercase__ : str = True lowercase__ : Union[str, Any] = 1 lowercase__ : int = """NO""" lowercase__ : Tuple = ClusterConfig(**lowercase_ ) config.to_json_file(lowercase_ ) return path def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' lowercase__ : List[str] = parser.add_parser("""default""" , parents=lowercase_ , help=lowercase_ , formatter_class=lowercase_ ) parser.add_argument( """--config_file""" , default=lowercase_ , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) , dest="""save_location""" , ) parser.add_argument( """--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=lowercase_ , help="""Whether or not to use mixed precision training. """ """Choose between FP16 and BF16 (bfloat16) training. """ """BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , ) parser.set_defaults(func=lowercase_ ) return parser def UpperCamelCase ( lowercase_ ) -> Any: '''simple docstring''' lowercase__ : Optional[Any] = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F'accelerate configuration saved at {config_file}' )
12
1
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE_=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , ): '''simple docstring''' lowercase__ : Any = parent lowercase__ : Any = batch_size lowercase__ : Dict = image_size lowercase__ : Union[str, Any] = num_channels lowercase__ : Optional[Any] = embeddings_size lowercase__ : Optional[Any] = hidden_sizes lowercase__ : Any = depths lowercase__ : Optional[int] = is_training lowercase__ : Optional[int] = use_labels lowercase__ : Optional[int] = hidden_act lowercase__ : Dict = num_labels lowercase__ : str = scope lowercase__ : Optional[int] = len(SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) lowercase__ : Union[str, Any] = None if self.use_labels: lowercase__ : Any = ids_tensor([self.batch_size] , self.num_labels) lowercase__ : str = self.get_config() return config, pixel_values, labels def lowercase__ ( self): '''simple docstring''' return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : str = TFResNetModel(config=SCREAMING_SNAKE_CASE_) lowercase__ : int = model(SCREAMING_SNAKE_CASE_) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Union[str, Any] = self.num_labels lowercase__ : List[Any] = TFResNetForImageClassification(SCREAMING_SNAKE_CASE_) lowercase__ : int = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs lowercase__ : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () __lowerCAmelCase : Optional[Any] = ( {'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification} if is_tf_available() else {} ) __lowerCAmelCase : Optional[Any] = False __lowerCAmelCase : Optional[Any] = False __lowerCAmelCase : List[str] = False __lowerCAmelCase : str = False __lowerCAmelCase : List[Any] = False def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = TFResNetModelTester(self) lowercase__ : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase__ ( self): '''simple docstring''' return @unittest.skip(reason="""ResNet does not use inputs_embeds""") def lowercase__ ( self): '''simple docstring''' pass @unittest.skip(reason="""ResNet does not support input and output embeddings""") def lowercase__ ( self): '''simple docstring''' pass def lowercase__ ( self): '''simple docstring''' lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : List[Any] = [*signature.parameters.keys()] lowercase__ : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): lowercase__ : int = model_class(SCREAMING_SNAKE_CASE_) lowercase__ : Dict = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)) lowercase__ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase__ : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(SCREAMING_SNAKE_CASE_) , expected_num_stages + 1) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : int = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase__ : Any = layer_type lowercase__ : str = True check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Dict = True check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_) @slow def lowercase__ ( self): '''simple docstring''' for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : List[str] = TFResNetModel.from_pretrained(SCREAMING_SNAKE_CASE_) self.assertIsNotNone(SCREAMING_SNAKE_CASE_) def UpperCamelCase ( ) -> Dict: '''simple docstring''' lowercase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class _snake_case ( unittest.TestCase ): @cached_property def lowercase__ ( self): '''simple docstring''' return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None ) @slow def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]) lowercase__ : Any = self.default_image_processor lowercase__ : Any = prepare_img() lowercase__ : List[str] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""tf""") # forward pass lowercase__ : List[Any] = model(**SCREAMING_SNAKE_CASE_) # verify the logits lowercase__ : List[str] = tf.TensorShape((1, 10_00)) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7]) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , SCREAMING_SNAKE_CASE_ , atol=1E-4))
12
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ : List[Any] = logging.get_logger(__name__) lowerCamelCase__ : Union[str, Any] = { """YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""", """YituTech/conv-bert-medium-small""": ( """https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json""" ), """YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""", # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : Union[str, Any] = 'convbert' def __init__( self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=9 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) lowercase__ : Dict = vocab_size lowercase__ : List[Any] = hidden_size lowercase__ : Optional[Any] = num_hidden_layers lowercase__ : Union[str, Any] = num_attention_heads lowercase__ : List[str] = intermediate_size lowercase__ : Optional[int] = hidden_act lowercase__ : Tuple = hidden_dropout_prob lowercase__ : List[str] = attention_probs_dropout_prob lowercase__ : Tuple = max_position_embeddings lowercase__ : Dict = type_vocab_size lowercase__ : Union[str, Any] = initializer_range lowercase__ : Dict = layer_norm_eps lowercase__ : Tuple = embedding_size lowercase__ : List[str] = head_ratio lowercase__ : Dict = conv_kernel_size lowercase__ : Dict = num_groups lowercase__ : int = classifier_dropout class _snake_case ( UpperCAmelCase_ ): @property def lowercase__ ( self): '''simple docstring''' if self.task == "multiple-choice": lowercase__ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase__ : str = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ])
12
1