code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowercase ( a_ , unittest.TestCase ): """simple docstring""" _UpperCamelCase : Optional[Any] = KandinskyVaaControlnetPipeline _UpperCamelCase : List[Any] = ["image_embeds", "negative_image_embeds", "hint"] _UpperCamelCase : int = ["image_embeds", "negative_image_embeds", "hint"] _UpperCamelCase : int = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] _UpperCamelCase : Any = False @property def __UpperCAmelCase ( self : Any ): '''simple docstring''' return 32 @property def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return 32 @property def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' return self.time_input_dim @property def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def __UpperCAmelCase ( self : Dict ): '''simple docstring''' return 1_00 @property def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' torch.manual_seed(0 ) _snake_case : Optional[int] = { 'in_channels': 8, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image_hint', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } _snake_case : str = UNetaDConditionModel(**lowerCamelCase_ ) return model @property def __UpperCAmelCase ( self : int ): '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) _snake_case : Any = VQModel(**self.dummy_movq_kwargs ) return model def __UpperCAmelCase ( self : Dict ): '''simple docstring''' _snake_case : Dict = self.dummy_unet _snake_case : int = self.dummy_movq _snake_case : Any = DDIMScheduler( num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , steps_offset=1 , prediction_type='epsilon' , thresholding=lowerCamelCase_ , ) _snake_case : Optional[Any] = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Dict=0 ): '''simple docstring''' _snake_case : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ ) _snake_case : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( lowerCamelCase_ ) # create hint _snake_case : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ ) if str(lowerCamelCase_ ).startswith('mps' ): _snake_case : int = torch.manual_seed(lowerCamelCase_ ) else: _snake_case : int = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) _snake_case : str = { 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'hint': hint, 'generator': generator, 'height': 64, 'width': 64, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def __UpperCAmelCase ( self : Optional[int] ): '''simple docstring''' _snake_case : Any = 'cpu' _snake_case : Optional[int] = self.get_dummy_components() _snake_case : Union[str, Any] = self.pipeline_class(**lowerCamelCase_ ) _snake_case : Optional[Any] = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : Tuple = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) ) _snake_case : List[Any] = output.images _snake_case : str = pipe( **self.get_dummy_inputs(lowerCamelCase_ ) , return_dict=lowerCamelCase_ , )[0] _snake_case : Optional[Any] = image[0, -3:, -3:, -1] _snake_case : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _snake_case : Tuple = np.array( [0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class lowercase ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Dict ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self : Dict ): '''simple docstring''' _snake_case : Dict = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' ) _snake_case : Optional[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/hint_image_cat.png' ) _snake_case : List[Any] = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0 _snake_case : Optional[int] = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) _snake_case : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(lowerCamelCase_ ) _snake_case : str = KandinskyVaaControlnetPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa ) _snake_case : Any = pipeline.to(lowerCamelCase_ ) pipeline.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : str = 'A robot, 4k photo' _snake_case : Optional[Any] = torch.Generator(device='cuda' ).manual_seed(0 ) _snake_case , _snake_case : List[str] = pipe_prior( lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple() _snake_case : Optional[Any] = torch.Generator(device='cuda' ).manual_seed(0 ) _snake_case : Optional[Any] = pipeline( image_embeds=lowerCamelCase_ , negative_image_embeds=lowerCamelCase_ , hint=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=1_00 , output_type='np' , ) _snake_case : List[Any] = output.images[0] assert image.shape == (5_12, 5_12, 3) assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
652
import itertools import math def A__( __lowerCAmelCase ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A__( ): _snake_case : Optional[Any] = 2 while True: if is_prime(__lowerCAmelCase ): yield num num += 1 def A__( __lowerCAmelCase = 1_00_01 ): return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) ) if __name__ == "__main__": print(F'''{solution() = }''')
652
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from .config import config_command_parser from .config_args import default_config_file, load_config_from_file # noqa: F401 from .default import default_command_parser from .update import update_command_parser def A__( __lowerCAmelCase=None ): _snake_case : Union[str, Any] = argparse.ArgumentParser(add_help=__lowerCAmelCase , allow_abbrev=__lowerCAmelCase ) # The main config parser _snake_case : Optional[int] = config_command_parser(__lowerCAmelCase ) # The subparser to add commands to _snake_case : Dict = config_parser.add_subparsers(title='subcommands' , dest='subcommand' ) # Then add other parsers with the parent parser default_command_parser(__lowerCAmelCase , parents=[parent_parser] ) update_command_parser(__lowerCAmelCase , parents=[parent_parser] ) return config_parser def A__( ): _snake_case : Optional[Any] = get_config_parser() _snake_case : List[str] = config_parser.parse_args() if not hasattr(__lowerCAmelCase , 'func' ): config_parser.print_help() exit(1 ) # Run args.func(__lowerCAmelCase ) if __name__ == "__main__": main()
652
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowercase_ : Dict = '''bert-base-cased''' lowercase_ : Any = '''google/pegasus-xsum''' lowercase_ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] lowercase_ : Tuple = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] lowercase_ : Any = '''patrickvonplaten/t5-tiny-random''' lowercase_ : List[Any] = '''sshleifer/bart-tiny-random''' lowercase_ : Dict = '''sshleifer/tiny-mbart''' lowercase_ : str = '''sshleifer/tiny-marian-en-de''' def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : str = '\n'.join(__lowerCAmelCase ) Path(__lowerCAmelCase ).open('w' ).writelines(__lowerCAmelCase ) def A__( __lowerCAmelCase ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.source''' ) , __lowerCAmelCase ) _dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.target''' ) , __lowerCAmelCase ) return tmp_dir class lowercase ( a_ ): """simple docstring""" @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ): '''simple docstring''' _snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) _snake_case : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) _snake_case : Optional[int] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES ) _snake_case : Any = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES ) _snake_case : Dict = 4 _snake_case : Any = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated _snake_case , _snake_case : Optional[Any] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error. _snake_case : int = SeqaSeqDataset( lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , ) _snake_case : List[str] = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place _snake_case : List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str ): '''simple docstring''' _snake_case : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) _snake_case : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) _snake_case : Dict = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES ) _snake_case : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES ) _snake_case : Union[str, Any] = 4 _snake_case : Optional[int] = LegacySeqaSeqDataset( lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=20 , max_target_length=lowerCamelCase_ , ) _snake_case : Dict = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def __UpperCAmelCase ( self : Dict ): '''simple docstring''' _snake_case : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' ) _snake_case : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) _snake_case : Any = tmp_dir.joinpath('train.source' ).open().readlines() _snake_case : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(lowerCamelCase_ , lowerCamelCase_ , 1_28 , lowerCamelCase_ ) _snake_case : Tuple = {x.name for x in tmp_dir.iterdir()} _snake_case : Dict = {x.name for x in save_dir.iterdir()} _snake_case : str = save_dir.joinpath('train.source' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(lowerCamelCase_ ) < len(lowerCamelCase_ ) assert len(lowerCamelCase_ ) == 1 assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' ) def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' if not FAIRSEQ_AVAILABLE: return _snake_case , _snake_case , _snake_case : int = self._get_dataset(max_len=64 ) _snake_case : List[str] = 64 _snake_case : str = ds.make_dynamic_sampler(lowerCamelCase_ , required_batch_size_multiple=lowerCamelCase_ ) _snake_case : Optional[Any] = [len(lowerCamelCase_ ) for x in batch_sampler] assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples _snake_case : Union[str, Any] = DataLoader(lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 ) _snake_case : List[Any] = [] _snake_case : List[Any] = [] for batch in data_loader: _snake_case : Any = batch['input_ids'].shape _snake_case : str = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple _snake_case : int = np.product(batch['input_ids'].shape ) num_src_per_batch.append(lowerCamelCase_ ) if num_src_tokens > (max_tokens * 1.1): failures.append(lowerCamelCase_ ) assert num_src_per_batch[0] == max(lowerCamelCase_ ) if failures: raise AssertionError(f'''too many tokens in {len(lowerCamelCase_ )} batches''' ) def __UpperCAmelCase ( self : Optional[int] ): '''simple docstring''' _snake_case , _snake_case , _snake_case : str = self._get_dataset(max_len=5_12 ) _snake_case : Optional[Any] = 2 _snake_case : Dict = ds.make_sortish_sampler(lowerCamelCase_ , shuffle=lowerCamelCase_ ) _snake_case : int = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 ) _snake_case : str = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase_ ) _snake_case : Tuple = tokenizer.pad_token_id def count_pad_tokens(lowerCamelCase_ : List[str] , lowerCamelCase_ : Any="input_ids" ): return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) < sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) ) assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple=10_00 , lowerCamelCase_ : Tuple=1_28 ): '''simple docstring''' if os.getenv('USE_REAL_DATA' , lowerCamelCase_ ): _snake_case : Dict = 'examples/seq2seq/wmt_en_ro' _snake_case : List[Any] = max_len * 2 * 64 if not Path(lowerCamelCase_ ).joinpath('train.len' ).exists(): save_len_file(lowerCamelCase_ , lowerCamelCase_ ) else: _snake_case : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro' _snake_case : List[Any] = max_len * 4 save_len_file(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) _snake_case : str = SeqaSeqDataset( lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , n_obs=lowerCamelCase_ , ) return ds, max_tokens, tokenizer def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _snake_case , _snake_case , _snake_case : Any = self._get_dataset() _snake_case : List[str] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase_ ) ) _snake_case : Optional[Any] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase_ ) ) assert idsa.intersection(lowerCamelCase_ ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] ): '''simple docstring''' _snake_case : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ ) if tok_name == MBART_TINY: _snake_case : int = SeqaSeqDataset( lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , ) _snake_case : Optional[Any] = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: _snake_case : Tuple = SeqaSeqDataset( lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , ) _snake_case : List[Any] = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
652
1
def A__( __lowerCAmelCase = 1_00 ): _snake_case : List[str] = set() _snake_case : Union[str, Any] = 0 _snake_case : Dict = n + 1 # maximum limit for a in range(2 , __lowerCAmelCase ): for b in range(2 , __lowerCAmelCase ): _snake_case : Optional[Any] = a**b # calculates the current power collect_powers.add(__lowerCAmelCase ) # adds the result to the set return len(__lowerCAmelCase ) if __name__ == "__main__": print('''Number of terms ''', solution(int(str(input()).strip())))
652
from __future__ import annotations def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Dict = list(range(len(__lowerCAmelCase ) ) ) _snake_case : Optional[int] = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )] index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=__lowerCAmelCase ) _snake_case : float = 0 _snake_case : list[float] = [0] * len(__lowerCAmelCase ) for i in index: if weight[i] <= capacity: _snake_case : List[Any] = 1 max_value += value[i] capacity -= weight[i] else: _snake_case : Any = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
652
1
import numpy as np def A__( __lowerCAmelCase ): return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
652
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase_ : Any = { '''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''], '''tokenization_canine''': ['''CanineTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[Any] = [ '''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CanineForMultipleChoice''', '''CanineForQuestionAnswering''', '''CanineForSequenceClassification''', '''CanineForTokenClassification''', '''CanineLayer''', '''CanineModel''', '''CaninePreTrainedModel''', '''load_tf_weights_in_canine''', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
652
1
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase_ : Any = logging.get_logger(__name__) lowercase_ : Any = '''▁''' lowercase_ : str = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', } lowercase_ : Dict = { '''vocab_file''': { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json''' ), }, '''spm_file''': { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model''' ) }, } lowercase_ : Dict = { '''facebook/s2t-small-librispeech-asr''': 1024, } lowercase_ : List[Any] = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de'''] lowercase_ : Optional[Any] = {'''mustc''': MUSTC_LANGS} class lowercase ( a_ ): """simple docstring""" _UpperCamelCase : Dict = VOCAB_FILES_NAMES _UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Tuple = MAX_MODEL_INPUT_SIZES _UpperCamelCase : Optional[int] = ["input_ids", "attention_mask"] _UpperCamelCase : List[int] = [] def __init__( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any]="<s>" , lowerCamelCase_ : str="</s>" , lowerCamelCase_ : Union[str, Any]="<pad>" , lowerCamelCase_ : int="<unk>" , lowerCamelCase_ : int=False , lowerCamelCase_ : Any=False , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Optional[Dict[str, Any]] = None , **lowerCamelCase_ : Optional[Any] , ): '''simple docstring''' _snake_case : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , do_upper_case=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , lang_codes=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , ) _snake_case : Union[str, Any] = do_upper_case _snake_case : List[str] = do_lower_case _snake_case : Tuple = load_json(lowerCamelCase_ ) _snake_case : int = {v: k for k, v in self.encoder.items()} _snake_case : List[str] = spm_file _snake_case : Dict = load_spm(lowerCamelCase_ , self.sp_model_kwargs ) if lang_codes is not None: _snake_case : int = lang_codes _snake_case : Optional[Any] = LANGUAGES[lang_codes] _snake_case : List[str] = [f'''<lang:{lang}>''' for lang in self.langs] _snake_case : int = {lang: self.sp_model.PieceToId(f'''<lang:{lang}>''' ) for lang in self.langs} _snake_case : Any = self.lang_tokens _snake_case : Optional[int] = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: _snake_case : List[Any] = {} @property def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' return len(self.encoder ) @property def __UpperCAmelCase ( self : Any ): '''simple docstring''' return self._tgt_lang @tgt_lang.setter def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Any ): '''simple docstring''' _snake_case : Any = new_tgt_lang self.set_tgt_lang_special_tokens(lowerCamelCase_ ) def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : str ): '''simple docstring''' _snake_case : Dict = self.lang_code_to_id[tgt_lang] _snake_case : Optional[Any] = [lang_code_id] def __UpperCAmelCase ( self : str , lowerCamelCase_ : str ): '''simple docstring''' return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return self.encoder.get(lowerCamelCase_ , self.encoder[self.unk_token] ) def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ): '''simple docstring''' return self.decoder.get(lowerCamelCase_ , self.unk_token ) def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : List[str] ): '''simple docstring''' _snake_case : int = [] _snake_case : List[str] = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: _snake_case : Dict = self.sp_model.decode(lowerCamelCase_ ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " _snake_case : Optional[int] = [] else: current_sub_tokens.append(lowerCamelCase_ ) _snake_case : Any = self.sp_model.decode(lowerCamelCase_ ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any]=None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def __UpperCAmelCase ( self : int , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) _snake_case : List[str] = [1] * len(self.prefix_tokens ) _snake_case : Optional[Any] = [1] if token_ids_a is None: return prefix_ones + ([0] * len(lowerCamelCase_ )) + suffix_ones return prefix_ones + ([0] * len(lowerCamelCase_ )) + ([0] * len(lowerCamelCase_ )) + suffix_ones def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : Any = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : int ): '''simple docstring''' _snake_case : Union[str, Any] = self.__dict__.copy() _snake_case : int = None return state def __setstate__( self : Optional[Any] , lowerCamelCase_ : Dict ): '''simple docstring''' _snake_case : Dict = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): _snake_case : List[str] = {} _snake_case : int = load_spm(self.spm_file , self.sp_model_kwargs ) def __UpperCAmelCase ( self : str , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): '''simple docstring''' _snake_case : Optional[int] = Path(lowerCamelCase_ ) assert save_dir.is_dir(), f'''{save_directory} should be a directory''' _snake_case : Any = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file'] ) _snake_case : Optional[Any] = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file'] ) save_json(self.encoder , lowerCamelCase_ ) if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , lowerCamelCase_ ) elif not os.path.isfile(self.spm_file ): with open(lowerCamelCase_ , 'wb' ) as fi: _snake_case : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase_ ) return (str(lowerCamelCase_ ), str(lowerCamelCase_ )) def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Optional[Any] = sentencepiece.SentencePieceProcessor(**__lowerCAmelCase ) spm.Load(str(__lowerCAmelCase ) ) return spm def A__( __lowerCAmelCase ): with open(__lowerCAmelCase , 'r' ) as f: return json.load(__lowerCAmelCase ) def A__( __lowerCAmelCase , __lowerCAmelCase ): with open(__lowerCAmelCase , 'w' ) as f: json.dump(__lowerCAmelCase , __lowerCAmelCase , indent=2 )
652
import math def A__( __lowerCAmelCase ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A__( __lowerCAmelCase = 1_00_01 ): try: _snake_case : int = int(__lowerCAmelCase ) except (TypeError, ValueError): raise TypeError('Parameter nth must be int or castable to int.' ) from None if nth <= 0: raise ValueError('Parameter nth must be greater than or equal to one.' ) _snake_case : list[int] = [] _snake_case : List[Any] = 2 while len(__lowerCAmelCase ) < nth: if is_prime(__lowerCAmelCase ): primes.append(__lowerCAmelCase ) num += 1 else: num += 1 return primes[len(__lowerCAmelCase ) - 1] if __name__ == "__main__": print(F'''{solution() = }''')
652
1
import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration lowercase_ : Optional[int] = pytest.mark.integration lowercase_ : List[str] = {'''comet'''} lowercase_ : Optional[int] = importlib.util.find_spec('''fairseq''') is not None lowercase_ : Any = {'''code_eval'''} lowercase_ : List[Any] = os.name == '''nt''' lowercase_ : int = {'''bertscore''', '''frugalscore''', '''perplexity'''} lowercase_ : int = importlib.util.find_spec('''transformers''') is not None def A__( __lowerCAmelCase ): @wraps(__lowerCAmelCase ) def wrapper(self , __lowerCAmelCase ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('"test requires Fairseq"' ) else: test_case(self , __lowerCAmelCase ) return wrapper def A__( __lowerCAmelCase ): @wraps(__lowerCAmelCase ) def wrapper(self , __lowerCAmelCase ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('"test requires transformers"' ) else: test_case(self , __lowerCAmelCase ) return wrapper def A__( __lowerCAmelCase ): @wraps(__lowerCAmelCase ) def wrapper(self , __lowerCAmelCase ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('"test not supported on Windows"' ) else: test_case(self , __lowerCAmelCase ) return wrapper def A__( ): _snake_case : Optional[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( a_ , a_ , a_ ) @local class lowercase ( parameterized.TestCase ): """simple docstring""" _UpperCamelCase : List[Any] = {} _UpperCamelCase : Optional[Any] = None @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' ) def __UpperCAmelCase ( self : int , lowerCamelCase_ : Any ): '''simple docstring''' _snake_case : str = '[...]' _snake_case : Any = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , lowerCamelCase_ ) ).module_path ) _snake_case : List[str] = datasets.load.import_main_class(metric_module.__name__ , dataset=lowerCamelCase_ ) # check parameters _snake_case : Union[str, Any] = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(lowerCamelCase_ , metric_module.__name__ ): with self.use_local_metrics(): try: _snake_case : Optional[Any] = doctest.testmod(lowerCamelCase_ , verbose=lowerCamelCase_ , raise_on_error=lowerCamelCase_ ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def __UpperCAmelCase ( self : int , lowerCamelCase_ : int ): '''simple docstring''' _snake_case : str = '[...]' _snake_case : str = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , lowerCamelCase_ ) ).module_path ) # run doctest with self.use_local_metrics(): _snake_case : int = doctest.testmod(lowerCamelCase_ , verbose=lowerCamelCase_ , raise_on_error=lowerCamelCase_ ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Tuple ): '''simple docstring''' if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](lowerCamelCase_ ): yield else: yield @contextmanager def __UpperCAmelCase ( self : str ): '''simple docstring''' def load_local_metric(lowerCamelCase_ : Union[str, Any] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Optional[int] ): return load_metric(os.path.join('metrics' , lowerCamelCase_ ) , *lowerCamelCase_ , **lowerCamelCase_ ) with patch('datasets.load_metric' ) as mock_load_metric: _snake_case : Any = load_local_metric yield @classmethod def __UpperCAmelCase ( cls : Union[str, Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' def wrapper(lowerCamelCase_ : Union[str, Any] ): _snake_case : List[Any] = contextmanager(lowerCamelCase_ ) _snake_case : List[Any] = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('bleurt' ) def A__( __lowerCAmelCase ): import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags class lowercase ( a_ ): """simple docstring""" def __UpperCAmelCase ( self : str , lowerCamelCase_ : Dict ): '''simple docstring''' assert len(input_dict['input_ids'] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('bleurt.score._create_predictor' ) as mock_create_predictor: _snake_case : Union[str, Any] = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('bertscore' ) def A__( __lowerCAmelCase ): import torch def bert_cos_score_idf(__lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ): return torch.tensor([[1.0, 1.0, 1.0]] * len(__lowerCAmelCase ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('bert_score.scorer.get_model' ), patch( 'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf: _snake_case : Optional[int] = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('comet' ) def A__( __lowerCAmelCase ): def load_from_checkpoint(__lowerCAmelCase ): class lowercase : """simple docstring""" def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Dict , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' assert len(lowerCamelCase_ ) == 2 _snake_case : int = [0.19, 0.92] return scores, sum(lowerCamelCase_ ) / len(lowerCamelCase_ ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('comet.download_model' ) as mock_download_model: _snake_case : int = None with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint: _snake_case : Tuple = load_from_checkpoint yield def A__( ): _snake_case : Optional[int] = load_metric(os.path.join('metrics' , 'seqeval' ) ) _snake_case : List[str] = 'ERROR' _snake_case : str = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}''' with pytest.raises(__lowerCAmelCase , match=re.escape(__lowerCAmelCase ) ): metric.compute(predictions=[] , references=[] , scheme=__lowerCAmelCase )
652
import torch from transformers import AutoModel class lowercase ( torch.nn.Module ): """simple docstring""" def __init__( self : Tuple , lowerCamelCase_ : Dict="sayef/fsner-bert-base-uncased" ): '''simple docstring''' super(lowerCamelCase_ , self ).__init__() _snake_case : Optional[Any] = AutoModel.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ ) _snake_case : str = torch.nn.CosineSimilarity(3 , 1e-08 ) _snake_case : str = torch.nn.Softmax(dim=1 ) def __UpperCAmelCase ( self : int , **lowerCamelCase_ : List[str] ): '''simple docstring''' return self.bert(**lowerCamelCase_ ).last_hidden_state def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' return token_embeddings.sum(2 , keepdim=lowerCamelCase_ ) def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]=1 ): '''simple docstring''' return self.softmax(T * self.cos(lowerCamelCase_ , lowerCamelCase_ ) ) def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' _snake_case : int = W_supports['sizes'].tolist() _snake_case : int = W_supports['start_token_id'].item() _snake_case : List[str] = W_supports['end_token_id'].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] _snake_case : Optional[int] = self.BERT(**lowerCamelCase_ ) _snake_case : Optional[Any] = self.BERT(**lowerCamelCase_ ) _snake_case : Optional[int] = None _snake_case : Optional[int] = None _snake_case : List[str] = W_supports['input_ids'] == start_token_id _snake_case : Union[str, Any] = W_supports['input_ids'] == end_token_id for i, size in enumerate(lowerCamelCase_ ): if i == 0: _snake_case : str = 0 else: _snake_case : Union[str, Any] = support_sizes[i - 1] _snake_case : Tuple = S[s : s + size][start_token_masks[s : s + size]] _snake_case : Optional[int] = S[s : s + size][end_token_masks[s : s + size]] _snake_case : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) _snake_case : Union[str, Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: _snake_case : Optional[Any] = torch.vstack((p_starts, p_start) ) _snake_case : List[str] = torch.vstack((p_ends, p_end) ) else: _snake_case : Union[str, Any] = p_start _snake_case : Any = p_end return p_starts, p_ends
652
1
import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging lowercase_ : Union[str, Any] = logging.get_logger(__name__) class lowercase ( a_ ): """simple docstring""" _UpperCamelCase : List[str] = "linear" _UpperCamelCase : Optional[int] = "cosine" _UpperCamelCase : Union[str, Any] = "cosine_with_restarts" _UpperCamelCase : Tuple = "polynomial" _UpperCamelCase : Union[str, Any] = "constant" _UpperCamelCase : str = "constant_with_warmup" _UpperCamelCase : Tuple = "piecewise_constant" def A__( __lowerCAmelCase , __lowerCAmelCase = -1 ): return LambdaLR(__lowerCAmelCase , lambda __lowerCAmelCase : 1 , last_epoch=__lowerCAmelCase ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = -1 ): def lr_lambda(__lowerCAmelCase ): if current_step < num_warmup_steps: return float(__lowerCAmelCase ) / float(max(1.0 , __lowerCAmelCase ) ) return 1.0 return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , last_epoch=__lowerCAmelCase ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = -1 ): _snake_case : str = {} _snake_case : Optional[int] = step_rules.split(',' ) for rule_str in rule_list[:-1]: _snake_case , _snake_case : Tuple = rule_str.split(':' ) _snake_case : Union[str, Any] = int(__lowerCAmelCase ) _snake_case : Dict = float(__lowerCAmelCase ) _snake_case : Any = value _snake_case : List[str] = float(rule_list[-1] ) def create_rules_function(__lowerCAmelCase , __lowerCAmelCase ): def rule_func(__lowerCAmelCase ) -> float: _snake_case : Tuple = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(__lowerCAmelCase ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func _snake_case : Any = create_rules_function(__lowerCAmelCase , __lowerCAmelCase ) return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , last_epoch=__lowerCAmelCase ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=-1 ): def lr_lambda(__lowerCAmelCase ): if current_step < num_warmup_steps: return float(__lowerCAmelCase ) / float(max(1 , __lowerCAmelCase ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.5 , __lowerCAmelCase = -1 ): def lr_lambda(__lowerCAmelCase ): if current_step < num_warmup_steps: return float(__lowerCAmelCase ) / float(max(1 , __lowerCAmelCase ) ) _snake_case : Any = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowerCAmelCase ) * 2.0 * progress )) ) return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 , __lowerCAmelCase = -1 ): def lr_lambda(__lowerCAmelCase ): if current_step < num_warmup_steps: return float(__lowerCAmelCase ) / float(max(1 , __lowerCAmelCase ) ) _snake_case : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowerCAmelCase ) * progress) % 1.0) )) ) return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1E-7 , __lowerCAmelCase=1.0 , __lowerCAmelCase=-1 ): _snake_case : List[Any] = optimizer.defaults['lr'] if not (lr_init > lr_end): raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' ) def lr_lambda(__lowerCAmelCase ): if current_step < num_warmup_steps: return float(__lowerCAmelCase ) / float(max(1 , __lowerCAmelCase ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: _snake_case : List[Any] = lr_init - lr_end _snake_case : Dict = num_training_steps - num_warmup_steps _snake_case : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps _snake_case : str = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowercase_ : Optional[Any] = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = -1 , ): _snake_case : List[str] = SchedulerType(__lowerCAmelCase ) _snake_case : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(__lowerCAmelCase , last_epoch=__lowerCAmelCase ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(__lowerCAmelCase , step_rules=__lowerCAmelCase , last_epoch=__lowerCAmelCase ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(__lowerCAmelCase , num_warmup_steps=__lowerCAmelCase , last_epoch=__lowerCAmelCase ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( __lowerCAmelCase , num_warmup_steps=__lowerCAmelCase , num_training_steps=__lowerCAmelCase , num_cycles=__lowerCAmelCase , last_epoch=__lowerCAmelCase , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( __lowerCAmelCase , num_warmup_steps=__lowerCAmelCase , num_training_steps=__lowerCAmelCase , power=__lowerCAmelCase , last_epoch=__lowerCAmelCase , ) return schedule_func( __lowerCAmelCase , num_warmup_steps=__lowerCAmelCase , num_training_steps=__lowerCAmelCase , last_epoch=__lowerCAmelCase )
652
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowercase ( a_ , a_ , a_ ): """simple docstring""" @register_to_config def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , ): '''simple docstring''' super().__init__() _snake_case : Optional[int] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : List[str] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Union[str, Any] = False _snake_case : Tuple = nn.Dropout(p=lowerCamelCase_ ) _snake_case : Union[str, Any] = TaConfig( vocab_size=lowerCamelCase_ , d_model=lowerCamelCase_ , num_heads=lowerCamelCase_ , d_kv=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , feed_forward_proj=lowerCamelCase_ , is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , ) _snake_case : Union[str, Any] = nn.ModuleList() for lyr_num in range(lowerCamelCase_ ): _snake_case : Any = TaBlock(lowerCamelCase_ ) self.encoders.append(lowerCamelCase_ ) _snake_case : Tuple = TaLayerNorm(lowerCamelCase_ ) _snake_case : List[str] = nn.Dropout(p=lowerCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple ): '''simple docstring''' _snake_case : Any = self.token_embedder(lowerCamelCase_ ) _snake_case : List[Any] = encoder_input_tokens.shape[1] _snake_case : Any = torch.arange(lowerCamelCase_ , device=encoder_input_tokens.device ) x += self.position_encoding(lowerCamelCase_ ) _snake_case : Tuple = self.dropout_pre(lowerCamelCase_ ) # inverted the attention mask _snake_case : Dict = encoder_input_tokens.size() _snake_case : Optional[int] = self.get_extended_attention_mask(lowerCamelCase_ , lowerCamelCase_ ) for lyr in self.encoders: _snake_case : str = lyr(lowerCamelCase_ , lowerCamelCase_ )[0] _snake_case : Any = self.layer_norm(lowerCamelCase_ ) return self.dropout_post(lowerCamelCase_ ), encoder_inputs_mask
652
1
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES lowercase_ : str = logging.get_logger(__name__) lowercase_ : Optional[int] = OrderedDict( [ # Base model mapping ('''albert''', '''FlaxAlbertModel'''), ('''bart''', '''FlaxBartModel'''), ('''beit''', '''FlaxBeitModel'''), ('''bert''', '''FlaxBertModel'''), ('''big_bird''', '''FlaxBigBirdModel'''), ('''blenderbot''', '''FlaxBlenderbotModel'''), ('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''), ('''clip''', '''FlaxCLIPModel'''), ('''distilbert''', '''FlaxDistilBertModel'''), ('''electra''', '''FlaxElectraModel'''), ('''gpt-sw3''', '''FlaxGPT2Model'''), ('''gpt2''', '''FlaxGPT2Model'''), ('''gpt_neo''', '''FlaxGPTNeoModel'''), ('''gptj''', '''FlaxGPTJModel'''), ('''longt5''', '''FlaxLongT5Model'''), ('''marian''', '''FlaxMarianModel'''), ('''mbart''', '''FlaxMBartModel'''), ('''mt5''', '''FlaxMT5Model'''), ('''opt''', '''FlaxOPTModel'''), ('''pegasus''', '''FlaxPegasusModel'''), ('''regnet''', '''FlaxRegNetModel'''), ('''resnet''', '''FlaxResNetModel'''), ('''roberta''', '''FlaxRobertaModel'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''), ('''roformer''', '''FlaxRoFormerModel'''), ('''t5''', '''FlaxT5Model'''), ('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''), ('''vit''', '''FlaxViTModel'''), ('''wav2vec2''', '''FlaxWav2Vec2Model'''), ('''whisper''', '''FlaxWhisperModel'''), ('''xglm''', '''FlaxXGLMModel'''), ('''xlm-roberta''', '''FlaxXLMRobertaModel'''), ] ) lowercase_ : Optional[int] = OrderedDict( [ # Model for pre-training mapping ('''albert''', '''FlaxAlbertForPreTraining'''), ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''bert''', '''FlaxBertForPreTraining'''), ('''big_bird''', '''FlaxBigBirdForPreTraining'''), ('''electra''', '''FlaxElectraForPreTraining'''), ('''longt5''', '''FlaxLongT5ForConditionalGeneration'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''mt5''', '''FlaxMT5ForConditionalGeneration'''), ('''roberta''', '''FlaxRobertaForMaskedLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''), ('''roformer''', '''FlaxRoFormerForMaskedLM'''), ('''t5''', '''FlaxT5ForConditionalGeneration'''), ('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''), ('''whisper''', '''FlaxWhisperForConditionalGeneration'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''), ] ) lowercase_ : List[Any] = OrderedDict( [ # Model for Masked LM mapping ('''albert''', '''FlaxAlbertForMaskedLM'''), ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''bert''', '''FlaxBertForMaskedLM'''), ('''big_bird''', '''FlaxBigBirdForMaskedLM'''), ('''distilbert''', '''FlaxDistilBertForMaskedLM'''), ('''electra''', '''FlaxElectraForMaskedLM'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''roberta''', '''FlaxRobertaForMaskedLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''), ('''roformer''', '''FlaxRoFormerForMaskedLM'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''), ] ) lowercase_ : Any = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''), ('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''), ('''encoder-decoder''', '''FlaxEncoderDecoderModel'''), ('''longt5''', '''FlaxLongT5ForConditionalGeneration'''), ('''marian''', '''FlaxMarianMTModel'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''mt5''', '''FlaxMT5ForConditionalGeneration'''), ('''pegasus''', '''FlaxPegasusForConditionalGeneration'''), ('''t5''', '''FlaxT5ForConditionalGeneration'''), ] ) lowercase_ : Dict = OrderedDict( [ # Model for Image-classsification ('''beit''', '''FlaxBeitForImageClassification'''), ('''regnet''', '''FlaxRegNetForImageClassification'''), ('''resnet''', '''FlaxResNetForImageClassification'''), ('''vit''', '''FlaxViTForImageClassification'''), ] ) lowercase_ : List[str] = OrderedDict( [ ('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''), ] ) lowercase_ : Tuple = OrderedDict( [ # Model for Causal LM mapping ('''bart''', '''FlaxBartForCausalLM'''), ('''bert''', '''FlaxBertForCausalLM'''), ('''big_bird''', '''FlaxBigBirdForCausalLM'''), ('''electra''', '''FlaxElectraForCausalLM'''), ('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''), ('''gpt2''', '''FlaxGPT2LMHeadModel'''), ('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''), ('''gptj''', '''FlaxGPTJForCausalLM'''), ('''opt''', '''FlaxOPTForCausalLM'''), ('''roberta''', '''FlaxRobertaForCausalLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''), ('''xglm''', '''FlaxXGLMForCausalLM'''), ('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''), ] ) lowercase_ : int = OrderedDict( [ # Model for Sequence Classification mapping ('''albert''', '''FlaxAlbertForSequenceClassification'''), ('''bart''', '''FlaxBartForSequenceClassification'''), ('''bert''', '''FlaxBertForSequenceClassification'''), ('''big_bird''', '''FlaxBigBirdForSequenceClassification'''), ('''distilbert''', '''FlaxDistilBertForSequenceClassification'''), ('''electra''', '''FlaxElectraForSequenceClassification'''), ('''mbart''', '''FlaxMBartForSequenceClassification'''), ('''roberta''', '''FlaxRobertaForSequenceClassification'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''), ('''roformer''', '''FlaxRoFormerForSequenceClassification'''), ('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''), ] ) lowercase_ : str = OrderedDict( [ # Model for Question Answering mapping ('''albert''', '''FlaxAlbertForQuestionAnswering'''), ('''bart''', '''FlaxBartForQuestionAnswering'''), ('''bert''', '''FlaxBertForQuestionAnswering'''), ('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''), ('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''), ('''electra''', '''FlaxElectraForQuestionAnswering'''), ('''mbart''', '''FlaxMBartForQuestionAnswering'''), ('''roberta''', '''FlaxRobertaForQuestionAnswering'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''), ('''roformer''', '''FlaxRoFormerForQuestionAnswering'''), ('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''), ] ) lowercase_ : List[str] = OrderedDict( [ # Model for Token Classification mapping ('''albert''', '''FlaxAlbertForTokenClassification'''), ('''bert''', '''FlaxBertForTokenClassification'''), ('''big_bird''', '''FlaxBigBirdForTokenClassification'''), ('''distilbert''', '''FlaxDistilBertForTokenClassification'''), ('''electra''', '''FlaxElectraForTokenClassification'''), ('''roberta''', '''FlaxRobertaForTokenClassification'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''), ('''roformer''', '''FlaxRoFormerForTokenClassification'''), ('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''), ] ) lowercase_ : Any = OrderedDict( [ # Model for Multiple Choice mapping ('''albert''', '''FlaxAlbertForMultipleChoice'''), ('''bert''', '''FlaxBertForMultipleChoice'''), ('''big_bird''', '''FlaxBigBirdForMultipleChoice'''), ('''distilbert''', '''FlaxDistilBertForMultipleChoice'''), ('''electra''', '''FlaxElectraForMultipleChoice'''), ('''roberta''', '''FlaxRobertaForMultipleChoice'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''), ('''roformer''', '''FlaxRoFormerForMultipleChoice'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''), ] ) lowercase_ : int = OrderedDict( [ ('''bert''', '''FlaxBertForNextSentencePrediction'''), ] ) lowercase_ : Dict = OrderedDict( [ ('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''), ('''whisper''', '''FlaxWhisperForConditionalGeneration'''), ] ) lowercase_ : Optional[int] = OrderedDict( [ ('''whisper''', '''FlaxWhisperForAudioClassification'''), ] ) lowercase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) lowercase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) lowercase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) lowercase_ : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) lowercase_ : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) lowercase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) lowercase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) lowercase_ : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) lowercase_ : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) lowercase_ : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) lowercase_ : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) lowercase_ : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) lowercase_ : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) lowercase_ : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCamelCase : List[Any] = FLAX_MODEL_MAPPING lowercase_ : Optional[int] = auto_class_update(FlaxAutoModel) class lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCamelCase : List[str] = FLAX_MODEL_FOR_PRETRAINING_MAPPING lowercase_ : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''') class lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCamelCase : str = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING lowercase_ : str = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''') class lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCamelCase : int = FLAX_MODEL_FOR_MASKED_LM_MAPPING lowercase_ : Dict = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''') class lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCamelCase : Union[str, Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowercase_ : Optional[int] = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base''' ) class lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCamelCase : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowercase_ : List[str] = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='''sequence classification''' ) class lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCamelCase : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING lowercase_ : Dict = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''') class lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCamelCase : Any = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowercase_ : Optional[Any] = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='''token classification''' ) class lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCamelCase : Optional[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING lowercase_ : Union[str, Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''') class lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCamelCase : int = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING lowercase_ : Dict = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction''' ) class lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCamelCase : List[str] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowercase_ : Union[str, Any] = auto_class_update( FlaxAutoModelForImageClassification, head_doc='''image classification''' ) class lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCamelCase : List[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING lowercase_ : int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''') class lowercase ( _BaseAutoModelClass ): """simple docstring""" _UpperCamelCase : Tuple = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING lowercase_ : Union[str, Any] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling''' )
652
def A__( __lowerCAmelCase ): assert column_title.isupper() _snake_case : List[Any] = 0 _snake_case : List[str] = len(__lowerCAmelCase ) - 1 _snake_case : Dict = 0 while index >= 0: _snake_case : List[str] = (ord(column_title[index] ) - 64) * pow(26 , __lowerCAmelCase ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
652
1
from collections.abc import Sequence def A__( __lowerCAmelCase = None ): if nums is None or not nums: raise ValueError('Input sequence should not be empty' ) _snake_case : str = nums[0] for i in range(1 , len(__lowerCAmelCase ) ): _snake_case : Optional[Any] = nums[i] _snake_case : Union[str, Any] = max(__lowerCAmelCase , ans + num , __lowerCAmelCase ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user lowercase_ : List[str] = int(input('''Enter number of elements : ''').strip()) lowercase_ : Any = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n] print(max_subsequence_sum(array))
652
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets lowercase_ : List[str] = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' lowercase_ : Optional[int] = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' lowercase_ : Any = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): """simple docstring""" def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if version.parse(scb.__version__ ) < version.parse('1.4.12' ): raise ImportWarning( 'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n' 'You can install it with `pip install "sacrebleu>=1.4.12"`.' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ), } ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[ 'https://github.com/jhclark/tercom', ] , ) def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ): '''simple docstring''' _snake_case : str = len(references[0] ) if any(len(lowerCamelCase_ ) != references_per_prediction for refs in references ): raise ValueError('Sacrebleu requires the same number of references for each prediction' ) _snake_case : int = [[refs[i] for refs in references] for i in range(lowerCamelCase_ )] _snake_case : Optional[int] = TER( normalized=lowerCamelCase_ , no_punct=lowerCamelCase_ , asian_support=lowerCamelCase_ , case_sensitive=lowerCamelCase_ , ) _snake_case : Optional[Any] = sb_ter.corpus_score(lowerCamelCase_ , lowerCamelCase_ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
652
1
from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class lowercase ( unittest.TestCase ): """simple docstring""" @slow def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' for model_name in ["bert-base-uncased"]: _snake_case : List[Any] = AutoConfig.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : List[str] = TFAutoModel.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : str = AutoModel.from_pretrained(lowerCamelCase_ , from_tf=lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) @slow def __UpperCAmelCase ( self : Dict ): '''simple docstring''' for model_name in ["bert-base-uncased"]: _snake_case : List[Any] = AutoConfig.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : int = TFAutoModelForPreTraining.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Dict = AutoModelForPreTraining.from_pretrained(lowerCamelCase_ , from_tf=lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) @slow def __UpperCAmelCase ( self : Dict ): '''simple docstring''' for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Dict = AutoConfig.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Optional[int] = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ ) _snake_case , _snake_case : Tuple = TFAutoModelForCausalLM.from_pretrained( lowerCamelCase_ , output_loading_info=lowerCamelCase_ , from_pt=lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(lowerCamelCase_ , from_tf=lowerCamelCase_ ) _snake_case , _snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained( lowerCamelCase_ , output_loading_info=lowerCamelCase_ , from_tf=lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) @slow def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Dict = AutoConfig.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : str = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Dict = AutoModelWithLMHead.from_pretrained(lowerCamelCase_ , from_tf=lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) @slow def __UpperCAmelCase ( self : Dict ): '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[Any] = AutoConfig.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ ) _snake_case , _snake_case : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained( lowerCamelCase_ , output_loading_info=lowerCamelCase_ , from_pt=lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(lowerCamelCase_ , from_tf=lowerCamelCase_ ) _snake_case , _snake_case : Optional[int] = AutoModelForMaskedLM.from_pretrained( lowerCamelCase_ , output_loading_info=lowerCamelCase_ , from_tf=lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) @slow def __UpperCAmelCase ( self : Optional[int] ): '''simple docstring''' for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : int = AutoConfig.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ ) _snake_case , _snake_case : str = TFAutoModelForSeqaSeqLM.from_pretrained( lowerCamelCase_ , output_loading_info=lowerCamelCase_ , from_pt=lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Tuple = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_ , from_tf=lowerCamelCase_ ) _snake_case , _snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained( lowerCamelCase_ , output_loading_info=lowerCamelCase_ , from_tf=lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) @slow def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' for model_name in ["bert-base-uncased"]: _snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Optional[int] = TFAutoModelForSequenceClassification.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase_ , from_tf=lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) @slow def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' for model_name in ["bert-base-uncased"]: _snake_case : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Dict = TFAutoModelForQuestionAnswering.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : int = AutoModelForQuestionAnswering.from_pretrained(lowerCamelCase_ , from_tf=lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' _snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase_ ) , 1_44_10 ) _snake_case : Optional[int] = AutoModelWithLMHead.from_pretrained(lowerCamelCase_ , from_tf=lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase_ ) , 1_44_10 ) def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' _snake_case : Dict = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase_ ) , 1_44_10 ) _snake_case : str = AutoModelWithLMHead.from_pretrained(lowerCamelCase_ , from_tf=lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase_ ) , 1_44_10 )
652
import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels lowercase_ : Optional[int] = object() # For specifying empty leaf dict `{}` lowercase_ : List[Any] = object() def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Optional[Any] = tuple((re.compile(x + '$' ) for x in qs) ) for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ): _snake_case : Tuple = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , ks[i:] )] if matches and all(__lowerCAmelCase ): return True return False def A__( __lowerCAmelCase ): def replace(__lowerCAmelCase , __lowerCAmelCase ): for rule, replacement in rules: if _match(__lowerCAmelCase , __lowerCAmelCase ): return replacement return val return replace def A__( ): return [ # embeddings (("transformer", "wpe", "embedding"), P('mp' , __lowerCAmelCase )), (("transformer", "wte", "embedding"), P('mp' , __lowerCAmelCase )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase , 'mp' )), (("attention", "out_proj", "kernel"), P('mp' , __lowerCAmelCase )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__lowerCAmelCase , 'mp' )), (("mlp", "c_fc", "bias"), P('mp' )), (("mlp", "c_proj", "kernel"), P('mp' , __lowerCAmelCase )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def A__( __lowerCAmelCase ): _snake_case : Optional[Any] = _get_partition_rules() _snake_case : Optional[Any] = _replacement_rules(__lowerCAmelCase ) _snake_case : str = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )} _snake_case : str = {k: replace(__lowerCAmelCase , __lowerCAmelCase ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__lowerCAmelCase ) )
652
1
from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ): _snake_case , _snake_case : Any = coefficient_matrix.shape _snake_case , _snake_case : Optional[Any] = constant_matrix.shape if rowsa != colsa: _snake_case : str = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}''' raise ValueError(__lowerCAmelCase ) if colsa != 1: _snake_case : int = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}''' raise ValueError(__lowerCAmelCase ) if rowsa != rowsa: _snake_case : Optional[int] = ( 'Coefficient and constant matrices dimensions must be nxn and nx1 but ' F'''received {rowsa}x{colsa} and {rowsa}x{colsa}''' ) raise ValueError(__lowerCAmelCase ) if len(__lowerCAmelCase ) != rowsa: _snake_case : Union[str, Any] = ( 'Number of initial values must be equal to number of rows in coefficient ' F'''matrix but received {len(__lowerCAmelCase )} and {rowsa}''' ) raise ValueError(__lowerCAmelCase ) if iterations <= 0: raise ValueError('Iterations must be at least 1' ) _snake_case : NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) _snake_case , _snake_case : List[str] = table.shape strictly_diagonally_dominant(__lowerCAmelCase ) # Iterates the whole matrix for given number of times for _ in range(__lowerCAmelCase ): _snake_case : str = [] for row in range(__lowerCAmelCase ): _snake_case : int = 0 for col in range(__lowerCAmelCase ): if col == row: _snake_case : int = table[row][col] elif col == cols - 1: _snake_case : List[str] = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] _snake_case : Union[str, Any] = (temp + val) / denom new_val.append(__lowerCAmelCase ) _snake_case : Dict = new_val return [float(__lowerCAmelCase ) for i in new_val] def A__( __lowerCAmelCase ): _snake_case , _snake_case : Optional[int] = table.shape _snake_case : str = True for i in range(0 , __lowerCAmelCase ): _snake_case : List[str] = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError('Coefficient matrix is not strictly diagonally dominant' ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
652
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel lowercase_ : Any = logging.getLogger(__name__) def A__( __lowerCAmelCase , __lowerCAmelCase ): # save results if os.path.exists(__lowerCAmelCase ): if os.path.exists(os.path.join(__lowerCAmelCase , 'config.json' ) ) and os.path.isfile( os.path.join(__lowerCAmelCase , 'config.json' ) ): os.remove(os.path.join(__lowerCAmelCase , 'config.json' ) ) if os.path.exists(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ): os.remove(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) else: os.makedirs(__lowerCAmelCase ) model.save_pretrained(__lowerCAmelCase ) def A__( __lowerCAmelCase , __lowerCAmelCase=False ): _snake_case : int = 2 if unlogit: _snake_case : Dict = torch.pow(__lowerCAmelCase , __lowerCAmelCase ) _snake_case : Optional[int] = p * torch.log(__lowerCAmelCase ) _snake_case : List[str] = 0 return -plogp.sum(dim=-1 ) def A__( __lowerCAmelCase ): logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(__lowerCAmelCase ) ) ) ) for row in range(len(__lowerCAmelCase ) ): if tensor.dtype != torch.long: logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False ): _snake_case , _snake_case : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads _snake_case : Optional[int] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device ) _snake_case : Union[str, Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device ) if head_mask is None: _snake_case : List[str] = torch.ones(__lowerCAmelCase , __lowerCAmelCase ).to(args.device ) head_mask.requires_grad_(requires_grad=__lowerCAmelCase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _snake_case : List[str] = None _snake_case : str = 0.0 _snake_case : List[str] = 0.0 for step, inputs in enumerate(tqdm(__lowerCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ): _snake_case : Dict = tuple(t.to(args.device ) for t in inputs ) ((_snake_case) , ) : int = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _snake_case : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase , head_mask=__lowerCAmelCase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _snake_case , _snake_case , _snake_case : Dict = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__lowerCAmelCase ): _snake_case : int = entropy(attn.detach() , __lowerCAmelCase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__lowerCAmelCase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _snake_case : List[str] = 2 _snake_case : Any = torch.pow(torch.pow(__lowerCAmelCase , __lowerCAmelCase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: _snake_case : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(__lowerCAmelCase ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(__lowerCAmelCase ) logger.info('Head ranked by importance scores' ) _snake_case : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _snake_case : Optional[Any] = torch.arange( head_importance.numel() , device=args.device ) _snake_case : Union[str, Any] = head_ranks.view_as(__lowerCAmelCase ) print_ad_tensor(__lowerCAmelCase ) return attn_entropy, head_importance, total_loss def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case , _snake_case , _snake_case : int = compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase ) _snake_case : List[str] = 1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' , __lowerCAmelCase , original_score * args.masking_threshold ) _snake_case : Optional[Any] = torch.ones_like(__lowerCAmelCase ) _snake_case : Tuple = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _snake_case : Dict = original_score while current_score >= original_score * args.masking_threshold: _snake_case : List[str] = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _snake_case : Optional[Any] = float('Inf' ) _snake_case : Optional[int] = head_importance.view(-1 ).sort()[1] if len(__lowerCAmelCase ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads _snake_case : List[Any] = current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) ) _snake_case : Tuple = new_head_mask.view(-1 ) _snake_case : List[str] = 0.0 _snake_case : int = new_head_mask.view_as(__lowerCAmelCase ) _snake_case : Optional[Any] = new_head_mask.clone().detach() print_ad_tensor(__lowerCAmelCase ) # Compute metric and head importance again _snake_case , _snake_case , _snake_case : List[str] = compute_heads_importance( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , head_mask=__lowerCAmelCase ) _snake_case : Union[str, Any] = 1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' , __lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , ) logger.info('Final head mask' ) print_ad_tensor(__lowerCAmelCase ) np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() ) return head_mask def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : List[Any] = datetime.now() _snake_case , _snake_case , _snake_case : List[Any] = compute_heads_importance( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase ) _snake_case : str = 1 / loss _snake_case : Optional[int] = datetime.now() - before_time _snake_case : Optional[int] = sum(p.numel() for p in model.parameters() ) _snake_case : Optional[Any] = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCAmelCase ) ) } for k, v in heads_to_prune.items(): if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _snake_case : Optional[int] = [ v, ] assert sum(len(__lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__lowerCAmelCase ) _snake_case : Optional[Any] = sum(p.numel() for p in model.parameters() ) _snake_case : List[str] = datetime.now() _snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase , actually_pruned=__lowerCAmelCase , ) _snake_case : Dict = 1 / loss _snake_case : str = datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __lowerCAmelCase , __lowerCAmelCase , pruned_num_params / original_num_params * 1_00 , ) logger.info('Pruning: score with masking: %f score with pruning: %f' , __lowerCAmelCase , __lowerCAmelCase ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 ) save_model(__lowerCAmelCase , args.output_dir ) def A__( ): _snake_case : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , ) parser.add_argument( '--model_name_or_path' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , ) # Other parameters parser.add_argument( '--config_name' , default='' , type=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , ) parser.add_argument( '--tokenizer_name' , default='' , type=__lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , ) parser.add_argument( '--cache_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , ) parser.add_argument( '--data_subset' , type=__lowerCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , ) parser.add_argument( '--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' , default=0.9 , type=__lowerCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , ) parser.add_argument( '--masking_amount' , default=0.1 , type=__lowerCAmelCase , help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' , default='acc' , type=__lowerCAmelCase , help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' , default=1_28 , type=__lowerCAmelCase , help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) , ) parser.add_argument('--batch_size' , default=1 , type=__lowerCAmelCase , help='Batch size.' ) parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 ) parser.add_argument('--local_rank' , type=__lowerCAmelCase , default=-1 , help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' ) _snake_case : List[str] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _snake_case : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) _snake_case : Any = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _snake_case : Union[str, Any] = torch.device('cuda' , args.local_rank ) _snake_case : Dict = 1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _snake_case : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _snake_case : List[str] = nn.parallel.DistributedDataParallel( __lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCAmelCase ) elif args.n_gpu > 1: _snake_case : int = nn.DataParallel(__lowerCAmelCase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase ) torch.save(__lowerCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' , __lowerCAmelCase ) # Prepare dataset _snake_case : str = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _snake_case : Dict = (torch.from_numpy(__lowerCAmelCase ),) _snake_case : List[Any] = TensorDataset(*__lowerCAmelCase ) _snake_case : List[str] = RandomSampler(__lowerCAmelCase ) _snake_case : List[str] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _snake_case : Union[str, Any] = mask_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) prune_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": main()
652
1
from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowercase : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple=13 , lowerCamelCase_ : int=30 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Optional[Any]=37 , lowerCamelCase_ : str="gelu" , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : List[str]=10 , lowerCamelCase_ : List[str]=0.02 , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : str=2 , ): '''simple docstring''' _snake_case : str = parent _snake_case : List[Any] = batch_size _snake_case : Dict = image_size _snake_case : Any = patch_size _snake_case : Any = num_channels _snake_case : str = is_training _snake_case : Any = use_labels _snake_case : Union[str, Any] = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : Any = num_attention_heads _snake_case : Any = intermediate_size _snake_case : str = hidden_act _snake_case : Tuple = hidden_dropout_prob _snake_case : List[str] = attention_probs_dropout_prob _snake_case : Union[str, Any] = type_sequence_label_size _snake_case : List[Any] = initializer_range _snake_case : Optional[int] = scope _snake_case : List[Any] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) _snake_case : Tuple = (image_size // patch_size) ** 2 _snake_case : Optional[int] = num_patches + 2 def __UpperCAmelCase ( self : Dict ): '''simple docstring''' _snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Any = None if self.use_labels: _snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case : List[str] = self.get_config() return config, pixel_values, labels def __UpperCAmelCase ( self : Optional[int] ): '''simple docstring''' return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[str] ): '''simple docstring''' _snake_case : Dict = TFDeiTModel(config=lowerCamelCase_ ) _snake_case : Tuple = model(lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any ): '''simple docstring''' _snake_case : Tuple = TFDeiTForMaskedImageModeling(config=lowerCamelCase_ ) _snake_case : Dict = model(lowerCamelCase_ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _snake_case : Dict = 1 _snake_case : List[Any] = TFDeiTForMaskedImageModeling(lowerCamelCase_ ) _snake_case : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _snake_case : Optional[int] = model(lowerCamelCase_ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __UpperCAmelCase ( self : str , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int ): '''simple docstring''' _snake_case : List[Any] = self.type_sequence_label_size _snake_case : Dict = TFDeiTForImageClassification(lowerCamelCase_ ) _snake_case : Tuple = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _snake_case : Optional[int] = 1 _snake_case : List[Any] = TFDeiTForImageClassification(lowerCamelCase_ ) _snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _snake_case : List[str] = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' _snake_case : str = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : Union[str, Any] = config_and_inputs _snake_case : Optional[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class lowercase ( a_ , a_ , unittest.TestCase ): """simple docstring""" _UpperCamelCase : Optional[Any] = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) _UpperCamelCase : int = ( { "feature-extraction": TFDeiTModel, "image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) _UpperCamelCase : Optional[int] = False _UpperCamelCase : Optional[Any] = False _UpperCamelCase : Tuple = False _UpperCamelCase : Optional[int] = False def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' _snake_case : Any = TFDeiTModelTester(self ) _snake_case : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 ) def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds' ) def __UpperCAmelCase ( self : Dict ): '''simple docstring''' pass def __UpperCAmelCase ( self : str ): '''simple docstring''' _snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : List[Any] = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) _snake_case : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ , tf.keras.layers.Dense ) ) def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' _snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Optional[int] = model_class(lowerCamelCase_ ) _snake_case : Any = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : str = [*signature.parameters.keys()] _snake_case : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] ): '''simple docstring''' _snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ ) def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=False ): '''simple docstring''' _snake_case : Union[str, Any] = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Union[str, Any] = TFDeiTModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def A__( ): _snake_case : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class lowercase ( unittest.TestCase ): """simple docstring""" @cached_property def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' ) if is_vision_available() else None ) @slow def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' _snake_case : Any = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ) _snake_case : Any = self.default_image_processor _snake_case : List[Any] = prepare_img() _snake_case : Dict = image_processor(images=lowerCamelCase_ , return_tensors='tf' ) # forward pass _snake_case : Any = model(**lowerCamelCase_ ) # verify the logits _snake_case : Tuple = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) _snake_case : Optional[int] = tf.constant([-1.0266, 0.1912, -1.2861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
652
def A__( __lowerCAmelCase ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError('only integers accepted as input' ) else: _snake_case : Any = str(abs(__lowerCAmelCase ) ) _snake_case : List[str] = [list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )] for index in range(len(__lowerCAmelCase ) ): num_transpositions[index].pop(__lowerCAmelCase ) return max( int(''.join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
652
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase_ : List[str] = { '''configuration_blenderbot_small''': [ '''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlenderbotSmallConfig''', '''BlenderbotSmallOnnxConfig''', ], '''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[Any] = ['''BlenderbotSmallTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : str = [ '''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlenderbotSmallForCausalLM''', '''BlenderbotSmallForConditionalGeneration''', '''BlenderbotSmallModel''', '''BlenderbotSmallPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : List[Any] = [ '''TFBlenderbotSmallForConditionalGeneration''', '''TFBlenderbotSmallModel''', '''TFBlenderbotSmallPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[int] = [ '''FlaxBlenderbotSmallForConditionalGeneration''', '''FlaxBlenderbotSmallModel''', '''FlaxBlenderbotSmallPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys lowercase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
652
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask lowercase_ : Tuple = logging.getLogger(__name__) class lowercase ( a_ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Dict=-1 ): '''simple docstring''' _snake_case : str = label_idx def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[Split, str] ): '''simple docstring''' if isinstance(lowerCamelCase_ , lowerCamelCase_ ): _snake_case : Union[str, Any] = mode.value _snake_case : Optional[int] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' ) _snake_case : Dict = 1 _snake_case : List[str] = [] with open(lowerCamelCase_ , encoding='utf-8' ) as f: _snake_case : List[Any] = [] _snake_case : int = [] for line in f: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) ) guid_index += 1 _snake_case : Optional[Any] = [] _snake_case : Union[str, Any] = [] else: _snake_case : Tuple = line.split(' ' ) words.append(splits[0] ) if len(lowerCamelCase_ ) > 1: labels.append(splits[self.label_idx].replace('\n' , '' ) ) else: # Examples could have no label for mode = "test" labels.append('O' ) if words: examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) ) return examples def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ): '''simple docstring''' _snake_case : str = 0 for line in test_input_reader: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": writer.write(lowerCamelCase_ ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: _snake_case : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n' writer.write(lowerCamelCase_ ) else: logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] ) def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ): '''simple docstring''' if path: with open(lowerCamelCase_ , 'r' ) as f: _snake_case : Optional[int] = f.read().splitlines() if "O" not in labels: _snake_case : Optional[int] = ['O'] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class lowercase ( a_ ): """simple docstring""" def __init__( self : Optional[int] ): '''simple docstring''' super().__init__(label_idx=-2 ) def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ): '''simple docstring''' if path: with open(lowerCamelCase_ , 'r' ) as f: _snake_case : str = f.read().splitlines() if "O" not in labels: _snake_case : Union[str, Any] = ['O'] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class lowercase ( a_ ): """simple docstring""" def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[Split, str] ): '''simple docstring''' if isinstance(lowerCamelCase_ , lowerCamelCase_ ): _snake_case : str = mode.value _snake_case : List[str] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' ) _snake_case : Tuple = 1 _snake_case : List[str] = [] with open(lowerCamelCase_ , encoding='utf-8' ) as f: for sentence in parse_incr(lowerCamelCase_ ): _snake_case : List[str] = [] _snake_case : str = [] for token in sentence: words.append(token['form'] ) labels.append(token['upos'] ) assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) if words: examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) ) guid_index += 1 return examples def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ): '''simple docstring''' _snake_case : Dict = 0 for sentence in parse_incr(lowerCamelCase_ ): _snake_case : Optional[int] = preds_list[example_id] _snake_case : List[Any] = '' for token in sentence: out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) ''' out += "\n" writer.write(lowerCamelCase_ ) example_id += 1 def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ): '''simple docstring''' if path: with open(lowerCamelCase_ , 'r' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
652
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowercase_ : Tuple = { '''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : str = [ '''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoForCausalLM''', '''GPTNeoForQuestionAnswering''', '''GPTNeoForSequenceClassification''', '''GPTNeoForTokenClassification''', '''GPTNeoModel''', '''GPTNeoPreTrainedModel''', '''load_tf_weights_in_gpt_neo''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Union[str, Any] = [ '''FlaxGPTNeoForCausalLM''', '''FlaxGPTNeoModel''', '''FlaxGPTNeoPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys lowercase_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
652
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class lowercase ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __UpperCAmelCase ( self : Optional[int] ): '''simple docstring''' _snake_case : Tuple = 1 _snake_case : str = 3 _snake_case : List[str] = (32, 32) _snake_case : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ ) return image @property def __UpperCAmelCase ( self : str ): '''simple docstring''' torch.manual_seed(0 ) _snake_case : Union[str, Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) return model @property def __UpperCAmelCase ( self : Dict ): '''simple docstring''' torch.manual_seed(0 ) _snake_case : Optional[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' torch.manual_seed(0 ) _snake_case : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModel(lowerCamelCase_ ) @property def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' def extract(*lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ): class lowercase : """simple docstring""" def __init__( self : Tuple ): '''simple docstring''' _snake_case : List[str] = torch.ones([0] ) def __UpperCAmelCase ( self : int , lowerCamelCase_ : Tuple ): '''simple docstring''' self.pixel_values.to(lowerCamelCase_ ) return self return Out() return extract def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator _snake_case : int = self.dummy_cond_unet _snake_case : str = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , ) _snake_case : Union[str, Any] = self.dummy_vae _snake_case : Optional[Any] = self.dummy_text_encoder _snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # make sure here that pndm scheduler skips prk _snake_case : Union[str, Any] = StableDiffusionPipeline( unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , ) _snake_case : str = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : List[str] = 'A painting of a squirrel eating a burger' _snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) _snake_case : Optional[int] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' ) _snake_case : Union[str, Any] = output.images _snake_case : List[str] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) _snake_case : Any = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0] _snake_case : Tuple = image[0, -3:, -3:, -1] _snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _snake_case : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' _snake_case : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator _snake_case : List[str] = self.dummy_cond_unet _snake_case : List[str] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) _snake_case : int = self.dummy_vae _snake_case : List[Any] = self.dummy_text_encoder _snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # make sure here that pndm scheduler skips prk _snake_case : Any = StableDiffusionPipeline( unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , ) _snake_case : Union[str, Any] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : str = 'A painting of a squirrel eating a burger' _snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) _snake_case : Tuple = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' ) _snake_case : Optional[Any] = output.images _snake_case : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) _snake_case : Tuple = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0] _snake_case : Dict = image[0, -3:, -3:, -1] _snake_case : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _snake_case : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : Union[str, Any] = StableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowerCamelCase_ ) assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) assert isinstance(pipe.scheduler , lowerCamelCase_ ) assert pipe.safety_checker is None _snake_case : Dict = pipe('example prompt' , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase_ ) _snake_case : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ ) # sanity check that the pipeline still works assert pipe.safety_checker is None _snake_case : Union[str, Any] = pipe('example prompt' , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' _snake_case : Union[str, Any] = self.dummy_cond_unet _snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) _snake_case : Any = self.dummy_vae _snake_case : Optional[Any] = self.dummy_text_encoder _snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # put models in fp16 _snake_case : str = unet.half() _snake_case : Union[str, Any] = vae.half() _snake_case : Dict = bert.half() # make sure here that pndm scheduler skips prk _snake_case : List[str] = StableDiffusionPipeline( unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , ) _snake_case : List[str] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : Tuple = 'A painting of a squirrel eating a burger' _snake_case : Optional[int] = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class lowercase ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' _snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ ) _snake_case : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) _snake_case : Any = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : Optional[int] = ( 'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle' ' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with' ' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and' ' children from bahnhof zoo, detailed ' ) _snake_case : List[str] = 40_03_66_03_46 _snake_case : int = 7 # without safety guidance (sld_guidance_scale = 0) _snake_case : Union[str, Any] = torch.manual_seed(lowerCamelCase_ ) _snake_case : Union[str, Any] = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) _snake_case : str = output.images _snake_case : Dict = image[0, -3:, -3:, -1] _snake_case : Optional[int] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 # without safety guidance (strong configuration) _snake_case : Tuple = torch.manual_seed(lowerCamelCase_ ) _snake_case : int = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) _snake_case : Tuple = output.images _snake_case : int = image[0, -3:, -3:, -1] _snake_case : List[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : str = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ ) _snake_case : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) _snake_case : Any = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : Union[str, Any] = 'padme amidala taking a bath artwork, safe for work, no nudity' _snake_case : Optional[Any] = 27_34_97_17_55 _snake_case : Union[str, Any] = 7 _snake_case : Dict = torch.manual_seed(lowerCamelCase_ ) _snake_case : Tuple = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) _snake_case : Any = output.images _snake_case : int = image[0, -3:, -3:, -1] _snake_case : str = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 _snake_case : Optional[Any] = torch.manual_seed(lowerCamelCase_ ) _snake_case : Any = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) _snake_case : str = output.images _snake_case : List[str] = image[0, -3:, -3:, -1] _snake_case : Union[str, Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' _snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' ) _snake_case : Optional[int] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : List[Any] = ( 'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.' ' leyendecker' ) _snake_case : Union[str, Any] = 10_44_35_52_34 _snake_case : Dict = 12 _snake_case : Optional[int] = torch.manual_seed(lowerCamelCase_ ) _snake_case : Any = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) _snake_case : Optional[int] = output.images _snake_case : int = image[0, -3:, -3:, -1] _snake_case : Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7 _snake_case : List[Any] = torch.manual_seed(lowerCamelCase_ ) _snake_case : Optional[int] = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) _snake_case : str = output.images _snake_case : List[str] = image[0, -3:, -3:, -1] _snake_case : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] ) assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
652
1
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig lowercase_ : Dict = logging.get_logger(__name__) # General docstring lowercase_ : Optional[int] = '''RegNetConfig''' # Base docstring lowercase_ : List[Any] = '''facebook/regnet-y-040''' lowercase_ : int = [1, 1088, 7, 7] # Image classification docstring lowercase_ : Optional[int] = '''facebook/regnet-y-040''' lowercase_ : List[str] = '''tabby, tabby cat''' lowercase_ : int = [ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowercase ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : int , lowerCamelCase_ : int , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : Optional[str] = "relu" , **lowerCamelCase_ : Dict , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb _snake_case : List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) _snake_case : Tuple = tf.keras.layers.ConvaD( filters=lowerCamelCase_ , kernel_size=lowerCamelCase_ , strides=lowerCamelCase_ , padding='VALID' , groups=lowerCamelCase_ , use_bias=lowerCamelCase_ , name='convolution' , ) _snake_case : Optional[Any] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' ) _snake_case : Dict = ACTaFN[activation] if activation is not None else tf.identity def __UpperCAmelCase ( self : str , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' _snake_case : Any = self.convolution(self.padding(lowerCamelCase_ ) ) _snake_case : Optional[int] = self.normalization(lowerCamelCase_ ) _snake_case : Tuple = self.activation(lowerCamelCase_ ) return hidden_state class lowercase ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : str , lowerCamelCase_ : RegNetConfig , **lowerCamelCase_ : List[Any] ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) _snake_case : str = config.num_channels _snake_case : Tuple = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , ) def __UpperCAmelCase ( self : int , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' _snake_case : int = shape_list(lowerCamelCase_ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) _snake_case : int = tf.transpose(lowerCamelCase_ , perm=(0, 2, 3, 1) ) _snake_case : Optional[int] = self.embedder(lowerCamelCase_ ) return hidden_state class lowercase ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : int , lowerCamelCase_ : int , lowerCamelCase_ : int = 2 , **lowerCamelCase_ : Dict ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) _snake_case : int = tf.keras.layers.ConvaD( filters=lowerCamelCase_ , kernel_size=1 , strides=lowerCamelCase_ , use_bias=lowerCamelCase_ , name='convolution' ) _snake_case : Tuple = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' ) def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : bool = False ): '''simple docstring''' return self.normalization(self.convolution(lowerCamelCase_ ) , training=lowerCamelCase_ ) class lowercase ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : int , **lowerCamelCase_ : List[Any] ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) _snake_case : Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase_ , name='pooler' ) _snake_case : int = [ tf.keras.layers.ConvaD(filters=lowerCamelCase_ , kernel_size=1 , activation='relu' , name='attention.0' ), tf.keras.layers.ConvaD(filters=lowerCamelCase_ , kernel_size=1 , activation='sigmoid' , name='attention.2' ), ] def __UpperCAmelCase ( self : int , lowerCamelCase_ : Dict ): '''simple docstring''' _snake_case : int = self.pooler(lowerCamelCase_ ) for layer_module in self.attention: _snake_case : Dict = layer_module(lowerCamelCase_ ) _snake_case : Tuple = hidden_state * pooled return hidden_state class lowercase ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase_ : RegNetConfig , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) _snake_case : List[Any] = in_channels != out_channels or stride != 1 _snake_case : Tuple = max(1 , out_channels // config.groups_width ) _snake_case : List[str] = ( TFRegNetShortCut(lowerCamelCase_ , stride=lowerCamelCase_ , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. _snake_case : Optional[int] = [ TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act , name='layer.1' ), TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ , name='layer.2' ), ] _snake_case : int = ACTaFN[config.hidden_act] def __UpperCAmelCase ( self : int , lowerCamelCase_ : int ): '''simple docstring''' _snake_case : Dict = hidden_state for layer_module in self.layers: _snake_case : Union[str, Any] = layer_module(lowerCamelCase_ ) _snake_case : Optional[Any] = self.shortcut(lowerCamelCase_ ) hidden_state += residual _snake_case : Union[str, Any] = self.activation(lowerCamelCase_ ) return hidden_state class lowercase ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase_ : RegNetConfig , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) _snake_case : List[str] = in_channels != out_channels or stride != 1 _snake_case : Optional[int] = max(1 , out_channels // config.groups_width ) _snake_case : Optional[Any] = ( TFRegNetShortCut(lowerCamelCase_ , stride=lowerCamelCase_ , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) _snake_case : Union[str, Any] = [ TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act , name='layer.1' ), TFRegNetSELayer(lowerCamelCase_ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ), TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ , name='layer.3' ), ] _snake_case : List[Any] = ACTaFN[config.hidden_act] def __UpperCAmelCase ( self : str , lowerCamelCase_ : Optional[int] ): '''simple docstring''' _snake_case : Optional[int] = hidden_state for layer_module in self.layers: _snake_case : str = layer_module(lowerCamelCase_ ) _snake_case : int = self.shortcut(lowerCamelCase_ ) hidden_state += residual _snake_case : Union[str, Any] = self.activation(lowerCamelCase_ ) return hidden_state class lowercase ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : List[Any] , lowerCamelCase_ : RegNetConfig , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int = 2 , lowerCamelCase_ : int = 2 , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) _snake_case : Tuple = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer _snake_case : Tuple = [ # downsampling is done in the first layer with stride of 2 layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , name='layers.0' ), *[layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )], ] def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' for layer_module in self.layers: _snake_case : str = layer_module(lowerCamelCase_ ) return hidden_state class lowercase ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase_ : RegNetConfig , **lowerCamelCase_ : List[Any] ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) _snake_case : int = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( lowerCamelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) ) _snake_case : Optional[int] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase_ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , depth=lowerCamelCase_ , name=f'''stages.{i+1}''' ) ) def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = True ): '''simple docstring''' _snake_case : Optional[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _snake_case : Dict = hidden_states + (hidden_state,) _snake_case : Tuple = stage_module(lowerCamelCase_ ) if output_hidden_states: _snake_case : str = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase_ , hidden_states=lowerCamelCase_ ) @keras_serializable class lowercase ( tf.keras.layers.Layer ): """simple docstring""" _UpperCamelCase : Union[str, Any] = RegNetConfig def __init__( self : Dict , lowerCamelCase_ : Tuple , **lowerCamelCase_ : str ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) _snake_case : Optional[int] = config _snake_case : Tuple = TFRegNetEmbeddings(lowerCamelCase_ , name='embedder' ) _snake_case : Optional[Any] = TFRegNetEncoder(lowerCamelCase_ , name='encoder' ) _snake_case : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase_ , name='pooler' ) @unpack_inputs def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : bool = False , ): '''simple docstring''' _snake_case : List[str] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _snake_case : Tuple = return_dict if return_dict is not None else self.config.use_return_dict _snake_case : List[str] = self.embedder(lowerCamelCase_ , training=lowerCamelCase_ ) _snake_case : Union[str, Any] = self.encoder( lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ , training=lowerCamelCase_ ) _snake_case : Optional[int] = encoder_outputs[0] _snake_case : List[Any] = self.pooler(lowerCamelCase_ ) # Change to NCHW output format have uniformity in the modules _snake_case : int = tf.transpose(lowerCamelCase_ , perm=(0, 3, 1, 2) ) _snake_case : Dict = tf.transpose(lowerCamelCase_ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: _snake_case : str = tuple([tf.transpose(lowerCamelCase_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCamelCase_ , pooler_output=lowerCamelCase_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class lowercase ( a_ ): """simple docstring""" _UpperCamelCase : str = RegNetConfig _UpperCamelCase : Tuple = "regnet" _UpperCamelCase : List[str] = "pixel_values" @property def __UpperCAmelCase ( self : str ): '''simple docstring''' return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )} lowercase_ : int = r''' Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. ''' lowercase_ : List[str] = r''' Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , a_ , ) class lowercase ( a_ ): """simple docstring""" def __init__( self : Tuple , lowerCamelCase_ : RegNetConfig , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ): '''simple docstring''' super().__init__(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) _snake_case : Any = TFRegNetMainLayer(lowerCamelCase_ , name='regnet' ) @unpack_inputs @add_start_docstrings_to_model_forward(lowerCamelCase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __UpperCAmelCase ( self : int , lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[int]=False , ): '''simple docstring''' _snake_case : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _snake_case : int = return_dict if return_dict is not None else self.config.use_return_dict _snake_case : List[str] = self.regnet( pixel_values=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ , training=lowerCamelCase_ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , a_ , ) class lowercase ( a_ , a_ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : RegNetConfig , *lowerCamelCase_ : Dict , **lowerCamelCase_ : int ): '''simple docstring''' super().__init__(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) _snake_case : Any = config.num_labels _snake_case : Optional[int] = TFRegNetMainLayer(lowerCamelCase_ , name='regnet' ) # classification head _snake_case : Any = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(lowerCamelCase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : tf.Tensor = None , lowerCamelCase_ : tf.Tensor = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : Tuple=False , ): '''simple docstring''' _snake_case : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _snake_case : str = return_dict if return_dict is not None else self.config.use_return_dict _snake_case : Optional[Any] = self.regnet( lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ , training=lowerCamelCase_ ) _snake_case : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1] _snake_case : Tuple = self.classifier[0](lowerCamelCase_ ) _snake_case : Tuple = self.classifier[1](lowerCamelCase_ ) _snake_case : Dict = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase_ , logits=lowerCamelCase_ ) if not return_dict: _snake_case : str = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=lowerCamelCase_ , logits=lowerCamelCase_ , hidden_states=outputs.hidden_states )
652
import functools def A__( __lowerCAmelCase , __lowerCAmelCase ): # Validation if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for day in days ): raise ValueError('The parameter days should be a list of integers' ) if len(__lowerCAmelCase ) != 3 or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for cost in costs ): raise ValueError('The parameter costs should be a list of three integers' ) if len(__lowerCAmelCase ) == 0: return 0 if min(__lowerCAmelCase ) <= 0: raise ValueError('All days elements should be greater than 0' ) if max(__lowerCAmelCase ) >= 3_66: raise ValueError('All days elements should be less than 366' ) _snake_case : Optional[int] = set(__lowerCAmelCase ) @functools.cache def dynamic_programming(__lowerCAmelCase ) -> int: if index > 3_65: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
652
1
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def A__( __lowerCAmelCase ): if isinstance(__lowerCAmelCase , collections.abc.Iterable ): return x return (x, x) @require_tf class lowercase : """simple docstring""" def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : int ): '''simple docstring''' pass def __UpperCAmelCase ( self : Any ): '''simple docstring''' pass def __UpperCAmelCase ( self : Any ): '''simple docstring''' pass def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any]=None , **lowerCamelCase_ : int ): '''simple docstring''' _snake_case : str = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : str = TFVisionTextDualEncoderModel(lowerCamelCase_ ) _snake_case : Union[str, Any] = model(input_ids=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) ) def __UpperCAmelCase ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any=None , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' _snake_case , _snake_case : Tuple = self.get_vision_text_model(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Optional[Any] = TFVisionTextDualEncoderModel(vision_model=lowerCamelCase_ , text_model=lowerCamelCase_ ) _snake_case : Optional[int] = model(input_ids=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str=None , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' _snake_case , _snake_case : Union[str, Any] = self.get_vision_text_model(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Union[str, Any] = {'vision_model': vision_model, 'text_model': text_model} _snake_case : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase_ ) _snake_case : Dict = model(input_ids=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int=None , **lowerCamelCase_ : List[Any] ): '''simple docstring''' _snake_case , _snake_case : int = self.get_vision_text_model(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : List[str] = TFVisionTextDualEncoderModel(vision_model=lowerCamelCase_ , text_model=lowerCamelCase_ ) _snake_case : Optional[Any] = model(input_ids=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ ) _snake_case : int = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase_ ) _snake_case : List[Any] = TFVisionTextDualEncoderModel.from_pretrained(lowerCamelCase_ ) _snake_case : Optional[int] = model(input_ids=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ ) _snake_case : Optional[int] = after_output[0].numpy() _snake_case : Optional[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase_ , 1e-5 ) def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int=None , **lowerCamelCase_ : int ): '''simple docstring''' _snake_case , _snake_case : Optional[int] = self.get_vision_text_model(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=lowerCamelCase_ , text_model=lowerCamelCase_ ) _snake_case : int = model( input_ids=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , output_attentions=lowerCamelCase_ ) _snake_case : Tuple = output.vision_model_output.attentions self.assertEqual(len(lowerCamelCase_ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) _snake_case : str = to_atuple(vision_model.config.image_size ) _snake_case : Optional[int] = to_atuple(vision_model.config.patch_size ) _snake_case : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) _snake_case : int = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) _snake_case : Optional[int] = output.text_model_output.attentions self.assertEqual(len(lowerCamelCase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __UpperCAmelCase ( self : str , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : float ): '''simple docstring''' _snake_case : Optional[int] = np.abs((a - b) ).max() self.assertLessEqual(lowerCamelCase_ , lowerCamelCase_ , f'''Difference between torch and flax is {diff} (>= {tol}).''' ) def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**lowerCamelCase_ ) def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' _snake_case : Optional[int] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowerCamelCase_ ) def __UpperCAmelCase ( self : Any ): '''simple docstring''' _snake_case : Any = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase_ ) def __UpperCAmelCase ( self : Any ): '''simple docstring''' _snake_case : int = self.prepare_config_and_inputs() self.check_save_load(**lowerCamelCase_ ) def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' _snake_case : Union[str, Any] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowerCamelCase_ ) @slow def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' _snake_case , _snake_case : Union[str, Any] = self.get_pretrained_model_and_inputs() _snake_case : List[Any] = model_a(**lowerCamelCase_ ) _snake_case : Tuple = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowerCamelCase_ ) _snake_case : Any = TFVisionTextDualEncoderModel.from_pretrained(lowerCamelCase_ ) _snake_case : Optional[Any] = model_a(**lowerCamelCase_ ) _snake_case : List[Any] = after_outputs[0].numpy() _snake_case : Optional[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase_ , 1e-5 ) @require_tf class lowercase ( a_ , unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Optional[int] ): '''simple docstring''' _snake_case : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' ) _snake_case : str = 13 _snake_case : Dict = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) _snake_case : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) _snake_case : Any = random_attention_mask([batch_size, 4] ) _snake_case : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def __UpperCAmelCase ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : Dict ): '''simple docstring''' _snake_case : int = TFViTModel(lowerCamelCase_ , name='vision_model' ) _snake_case : Any = TFBertModel(lowerCamelCase_ , name='text_model' ) return vision_model, text_model def __UpperCAmelCase ( self : Optional[int] ): '''simple docstring''' _snake_case : Optional[int] = TFViTModelTester(self ) _snake_case : Union[str, Any] = TFBertModelTester(self ) _snake_case : str = vit_model_tester.prepare_config_and_inputs() _snake_case : Optional[int] = bert_model_tester.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : Dict = vision_config_and_inputs ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : str = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class lowercase ( a_ , unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' _snake_case : Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained( 'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' ) _snake_case : Dict = 13 _snake_case : Optional[Any] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) _snake_case : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) _snake_case : Any = random_attention_mask([batch_size, 4] ) _snake_case : Optional[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any=None , **lowerCamelCase_ : List[str] ): '''simple docstring''' _snake_case , _snake_case : Dict = self.get_vision_text_model(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Tuple = TFVisionTextDualEncoderModel(vision_model=lowerCamelCase_ , text_model=lowerCamelCase_ ) _snake_case : str = model( input_ids=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , output_attentions=lowerCamelCase_ ) _snake_case : Any = output.vision_model_output.attentions self.assertEqual(len(lowerCamelCase_ ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) _snake_case : int = to_atuple(vision_model.config.image_size ) _snake_case : int = to_atuple(vision_model.config.patch_size ) _snake_case : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) _snake_case : Tuple = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) _snake_case : Union[str, Any] = output.text_model_output.attentions self.assertEqual(len(lowerCamelCase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ): '''simple docstring''' _snake_case : Any = TFDeiTModel(lowerCamelCase_ , name='vision_model' ) _snake_case : Optional[Any] = TFRobertaModel(lowerCamelCase_ , name='text_model' ) return vision_model, text_model def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' _snake_case : Tuple = TFDeiTModelTester(self ) _snake_case : Tuple = TFRobertaModelTester(self ) _snake_case : str = vit_model_tester.prepare_config_and_inputs() _snake_case : Optional[int] = bert_model_tester.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : Optional[Any] = vision_config_and_inputs ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : Union[str, Any] = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class lowercase ( a_ , unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : str ): '''simple docstring''' _snake_case : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( 'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' ) _snake_case : List[str] = 13 _snake_case : Optional[Any] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) _snake_case : Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) _snake_case : Tuple = random_attention_mask([batch_size, 4] ) _snake_case : Dict = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' _snake_case : Optional[Any] = TFCLIPVisionModel(lowerCamelCase_ , name='vision_model' ) _snake_case : Optional[int] = TFBertModel(lowerCamelCase_ , name='text_model' ) return vision_model, text_model def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _snake_case : List[str] = TFCLIPVisionModelTester(self ) _snake_case : Optional[Any] = TFBertModelTester(self ) _snake_case : Optional[Any] = clip_model_tester.prepare_config_and_inputs() _snake_case : Any = bert_model_tester.prepare_config_and_inputs() _snake_case , _snake_case : Dict = vision_config_and_inputs ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : Optional[Any] = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class lowercase ( unittest.TestCase ): """simple docstring""" @slow def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' _snake_case : Tuple = TFVisionTextDualEncoderModel.from_pretrained( 'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=lowerCamelCase_ ) _snake_case : int = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' ) _snake_case : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) _snake_case : str = processor( text=['una foto di un gatto', 'una foto di un cane'] , images=lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='np' ) _snake_case : int = model(**lowerCamelCase_ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) _snake_case : Optional[int] = np.array([[1.228_4727, 0.310_4122]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowerCamelCase_ , atol=1e-3 ) )
652
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor lowercase_ : str = logging.get_logger(__name__) class lowercase ( a_ ): """simple docstring""" def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ): '''simple docstring''' warnings.warn( 'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , ) super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
652
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: lowercase_ : Optional[int] = None lowercase_ : Tuple = logging.get_logger(__name__) lowercase_ : Optional[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} lowercase_ : Dict = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''', }, } lowercase_ : int = { '''albert-base-v1''': 512, '''albert-large-v1''': 512, '''albert-xlarge-v1''': 512, '''albert-xxlarge-v1''': 512, '''albert-base-v2''': 512, '''albert-large-v2''': 512, '''albert-xlarge-v2''': 512, '''albert-xxlarge-v2''': 512, } lowercase_ : Dict = '''▁''' class lowercase ( a_ ): """simple docstring""" _UpperCamelCase : List[str] = VOCAB_FILES_NAMES _UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Union[str, Any] = AlbertTokenizer def __init__( self : Any , lowerCamelCase_ : str=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : Dict=False , lowerCamelCase_ : str="[CLS]" , lowerCamelCase_ : Optional[int]="[SEP]" , lowerCamelCase_ : Dict="<unk>" , lowerCamelCase_ : Union[str, Any]="[SEP]" , lowerCamelCase_ : Dict="<pad>" , lowerCamelCase_ : Optional[Any]="[CLS]" , lowerCamelCase_ : int="[MASK]" , **lowerCamelCase_ : int , ): '''simple docstring''' _snake_case : List[Any] = ( AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ , normalized=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token ) super().__init__( lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , ) _snake_case : List[str] = do_lower_case _snake_case : Union[str, Any] = remove_space _snake_case : List[Any] = keep_accents _snake_case : Dict = vocab_file _snake_case : Dict = False if not self.vocab_file else True def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' _snake_case : Optional[Any] = [self.sep_token_id] _snake_case : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' _snake_case : Dict = [self.sep_token_id] _snake_case : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCAmelCase ( self : str , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(lowerCamelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _snake_case : Tuple = os.path.join( lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ): copyfile(self.vocab_file , lowerCamelCase_ ) return (out_vocab_file,)
652
from math import factorial def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): if successes > trials: raise ValueError('successes must be lower or equal to trials' ) if trials < 0 or successes < 0: raise ValueError('the function is defined for non-negative integers' ) if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError('the function is defined for non-negative integers' ) if not 0 < prob < 1: raise ValueError('prob has to be in range of 1 - 0' ) _snake_case : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! _snake_case : List[Any] = float(factorial(__lowerCAmelCase ) ) coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('''Probability of 2 successes out of 4 trails''') print('''with probability of 0.75 is:''', end=''' ''') print(binomial_distribution(2, 4, 0.75))
652
1
def A__( __lowerCAmelCase ): if len(__lowerCAmelCase ) < 2: return collection def circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool: _snake_case : Any = False if low == high: return swapped _snake_case : Optional[int] = low _snake_case : int = high while left < right: if collection[left] > collection[right]: _snake_case , _snake_case : Dict = ( collection[right], collection[left], ) _snake_case : Optional[int] = True left += 1 right -= 1 if left == right and collection[left] > collection[right + 1]: _snake_case , _snake_case : Any = ( collection[right + 1], collection[left], ) _snake_case : Tuple = True _snake_case : Any = low + int((high - low) / 2 ) _snake_case : List[Any] = circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _snake_case : str = circle_sort_util(__lowerCAmelCase , mid + 1 , __lowerCAmelCase ) return swapped or left_swap or right_swap _snake_case : List[str] = True while is_not_sorted is True: _snake_case : int = circle_sort_util(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) - 1 ) return collection if __name__ == "__main__": lowercase_ : List[Any] = input('''Enter numbers separated by a comma:\n''').strip() lowercase_ : Any = [int(item) for item in user_input.split(''',''')] print(circle_sort(unsorted))
652
lowercase_ : Tuple = ''' # Installazione di Transformers ! pip install transformers datasets # Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git ''' lowercase_ : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] lowercase_ : str = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
652
1
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class lowercase ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __UpperCAmelCase ( self : Optional[int] ): '''simple docstring''' _snake_case : Tuple = 1 _snake_case : str = 3 _snake_case : List[str] = (32, 32) _snake_case : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ ) return image @property def __UpperCAmelCase ( self : str ): '''simple docstring''' torch.manual_seed(0 ) _snake_case : Union[str, Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) return model @property def __UpperCAmelCase ( self : Dict ): '''simple docstring''' torch.manual_seed(0 ) _snake_case : Optional[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' torch.manual_seed(0 ) _snake_case : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModel(lowerCamelCase_ ) @property def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' def extract(*lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ): class lowercase : """simple docstring""" def __init__( self : Tuple ): '''simple docstring''' _snake_case : List[str] = torch.ones([0] ) def __UpperCAmelCase ( self : int , lowerCamelCase_ : Tuple ): '''simple docstring''' self.pixel_values.to(lowerCamelCase_ ) return self return Out() return extract def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator _snake_case : int = self.dummy_cond_unet _snake_case : str = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , ) _snake_case : Union[str, Any] = self.dummy_vae _snake_case : Optional[Any] = self.dummy_text_encoder _snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # make sure here that pndm scheduler skips prk _snake_case : Union[str, Any] = StableDiffusionPipeline( unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , ) _snake_case : str = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : List[str] = 'A painting of a squirrel eating a burger' _snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) _snake_case : Optional[int] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' ) _snake_case : Union[str, Any] = output.images _snake_case : List[str] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) _snake_case : Any = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0] _snake_case : Tuple = image[0, -3:, -3:, -1] _snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _snake_case : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' _snake_case : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator _snake_case : List[str] = self.dummy_cond_unet _snake_case : List[str] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) _snake_case : int = self.dummy_vae _snake_case : List[Any] = self.dummy_text_encoder _snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # make sure here that pndm scheduler skips prk _snake_case : Any = StableDiffusionPipeline( unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , ) _snake_case : Union[str, Any] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : str = 'A painting of a squirrel eating a burger' _snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) _snake_case : Tuple = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' ) _snake_case : Optional[Any] = output.images _snake_case : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) _snake_case : Tuple = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0] _snake_case : Dict = image[0, -3:, -3:, -1] _snake_case : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _snake_case : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : Union[str, Any] = StableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowerCamelCase_ ) assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) assert isinstance(pipe.scheduler , lowerCamelCase_ ) assert pipe.safety_checker is None _snake_case : Dict = pipe('example prompt' , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase_ ) _snake_case : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ ) # sanity check that the pipeline still works assert pipe.safety_checker is None _snake_case : Union[str, Any] = pipe('example prompt' , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' _snake_case : Union[str, Any] = self.dummy_cond_unet _snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) _snake_case : Any = self.dummy_vae _snake_case : Optional[Any] = self.dummy_text_encoder _snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # put models in fp16 _snake_case : str = unet.half() _snake_case : Union[str, Any] = vae.half() _snake_case : Dict = bert.half() # make sure here that pndm scheduler skips prk _snake_case : List[str] = StableDiffusionPipeline( unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , ) _snake_case : List[str] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : Tuple = 'A painting of a squirrel eating a burger' _snake_case : Optional[int] = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class lowercase ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' _snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ ) _snake_case : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) _snake_case : Any = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : Optional[int] = ( 'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle' ' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with' ' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and' ' children from bahnhof zoo, detailed ' ) _snake_case : List[str] = 40_03_66_03_46 _snake_case : int = 7 # without safety guidance (sld_guidance_scale = 0) _snake_case : Union[str, Any] = torch.manual_seed(lowerCamelCase_ ) _snake_case : Union[str, Any] = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) _snake_case : str = output.images _snake_case : Dict = image[0, -3:, -3:, -1] _snake_case : Optional[int] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 # without safety guidance (strong configuration) _snake_case : Tuple = torch.manual_seed(lowerCamelCase_ ) _snake_case : int = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) _snake_case : Tuple = output.images _snake_case : int = image[0, -3:, -3:, -1] _snake_case : List[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : str = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ ) _snake_case : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) _snake_case : Any = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : Union[str, Any] = 'padme amidala taking a bath artwork, safe for work, no nudity' _snake_case : Optional[Any] = 27_34_97_17_55 _snake_case : Union[str, Any] = 7 _snake_case : Dict = torch.manual_seed(lowerCamelCase_ ) _snake_case : Tuple = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) _snake_case : Any = output.images _snake_case : int = image[0, -3:, -3:, -1] _snake_case : str = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 _snake_case : Optional[Any] = torch.manual_seed(lowerCamelCase_ ) _snake_case : Any = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) _snake_case : str = output.images _snake_case : List[str] = image[0, -3:, -3:, -1] _snake_case : Union[str, Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' _snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' ) _snake_case : Optional[int] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : List[Any] = ( 'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.' ' leyendecker' ) _snake_case : Union[str, Any] = 10_44_35_52_34 _snake_case : Dict = 12 _snake_case : Optional[int] = torch.manual_seed(lowerCamelCase_ ) _snake_case : Any = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) _snake_case : Optional[int] = output.images _snake_case : int = image[0, -3:, -3:, -1] _snake_case : Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7 _snake_case : List[Any] = torch.manual_seed(lowerCamelCase_ ) _snake_case : Optional[int] = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) _snake_case : str = output.images _snake_case : List[str] = image[0, -3:, -3:, -1] _snake_case : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] ) assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
652
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase_ : Optional[Any] = { '''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''], '''tokenization_roc_bert''': ['''RoCBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Tuple = [ '''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoCBertForCausalLM''', '''RoCBertForMaskedLM''', '''RoCBertForMultipleChoice''', '''RoCBertForPreTraining''', '''RoCBertForQuestionAnswering''', '''RoCBertForSequenceClassification''', '''RoCBertForTokenClassification''', '''RoCBertLayer''', '''RoCBertModel''', '''RoCBertPreTrainedModel''', '''load_tf_weights_in_roc_bert''', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
652
1
def A__( __lowerCAmelCase = 50 ): _snake_case : Union[str, Any] = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(F'''{solution() = }''')
652
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase_ : Optional[int] = { '''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Any = [ '''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MegaForCausalLM''', '''MegaForMaskedLM''', '''MegaForMultipleChoice''', '''MegaForQuestionAnswering''', '''MegaForSequenceClassification''', '''MegaForTokenClassification''', '''MegaModel''', '''MegaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
652
1
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class lowercase ( a_ ): """simple docstring""" _UpperCamelCase : Dict = "Speech2TextFeatureExtractor" _UpperCamelCase : List[Any] = "Speech2TextTokenizer" def __init__( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple ): '''simple docstring''' super().__init__(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Dict = self.feature_extractor _snake_case : Union[str, Any] = False def __call__( self : List[str] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*lowerCamelCase_ , **lowerCamelCase_ ) if "raw_speech" in kwargs: warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' ) _snake_case : List[str] = kwargs.pop('raw_speech' ) else: _snake_case : Union[str, Any] = kwargs.pop('audio' , lowerCamelCase_ ) _snake_case : Tuple = kwargs.pop('sampling_rate' , lowerCamelCase_ ) _snake_case : Optional[Any] = kwargs.pop('text' , lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: _snake_case : Tuple = args[0] _snake_case : str = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if audio is not None: _snake_case : Dict = self.feature_extractor(lowerCamelCase_ , *lowerCamelCase_ , sampling_rate=lowerCamelCase_ , **lowerCamelCase_ ) if text is not None: _snake_case : Dict = self.tokenizer(lowerCamelCase_ , **lowerCamelCase_ ) if text is None: return inputs elif audio is None: return encodings else: _snake_case : Optional[int] = encodings['input_ids'] return inputs def __UpperCAmelCase ( self : Union[str, Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ ) def __UpperCAmelCase ( self : List[str] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Tuple ): '''simple docstring''' return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ ) @contextmanager def __UpperCAmelCase ( self : Optional[int] ): '''simple docstring''' warnings.warn( '`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ' 'labels by using the argument `text` of the regular `__call__` method (either in the same call as ' 'your audio inputs, or in a separate call.' ) _snake_case : Dict = True _snake_case : Union[str, Any] = self.tokenizer yield _snake_case : Union[str, Any] = self.feature_extractor _snake_case : Any = False
652
import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) lowercase_ : Optional[Any] = pytest.mark.integration @pytest.mark.parametrize('path' , ['paws', 'csv'] ) def A__( __lowerCAmelCase , __lowerCAmelCase ): inspect_dataset(__lowerCAmelCase , __lowerCAmelCase ) _snake_case : str = path + '.py' assert script_name in os.listdir(__lowerCAmelCase ) assert "__pycache__" not in os.listdir(__lowerCAmelCase ) @pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.parametrize('path' , ['accuracy'] ) def A__( __lowerCAmelCase , __lowerCAmelCase ): inspect_metric(__lowerCAmelCase , __lowerCAmelCase ) _snake_case : Tuple = path + '.py' assert script_name in os.listdir(__lowerCAmelCase ) assert "__pycache__" not in os.listdir(__lowerCAmelCase ) @pytest.mark.parametrize( 'path, config_name, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Union[str, Any] = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): with pytest.raises(__lowerCAmelCase ): get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase ) @pytest.mark.parametrize( 'path, expected' , [ ('squad', 'plain_text'), ('acronym_identification', 'default'), ('lhoestq/squad', 'plain_text'), ('lhoestq/test', 'default'), ('lhoestq/demo1', 'lhoestq--demo1'), ('dalle-mini/wit', 'dalle-mini--wit'), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Optional[Any] = get_dataset_config_names(__lowerCAmelCase ) assert expected in config_names @pytest.mark.parametrize( 'path, expected_configs, expected_splits_in_first_config' , [ ('squad', ['plain_text'], ['train', 'validation']), ('dalle-mini/wit', ['dalle-mini--wit'], ['train']), ('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : List[Any] = get_dataset_infos(__lowerCAmelCase ) assert list(infos.keys() ) == expected_configs _snake_case : Any = expected_configs[0] assert expected_config in infos _snake_case : str = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( 'path, expected_config, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Any = get_dataset_infos(__lowerCAmelCase ) assert expected_config in infos _snake_case : Any = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): with pytest.raises(__lowerCAmelCase ): get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
652
1
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class lowercase ( a_ ): """simple docstring""" _UpperCamelCase : Any = (DEISMultistepScheduler,) _UpperCamelCase : List[str] = (("num_inference_steps", 25),) def __UpperCAmelCase ( self : Union[str, Any] , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' _snake_case : Tuple = { 'num_train_timesteps': 10_00, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'solver_order': 2, } config.update(**lowerCamelCase_ ) return config def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[int]=0 , **lowerCamelCase_ : Tuple ): '''simple docstring''' _snake_case : str = dict(self.forward_default_kwargs ) _snake_case : Any = kwargs.pop('num_inference_steps' , lowerCamelCase_ ) _snake_case : int = self.dummy_sample _snake_case : Optional[Any] = 0.1 * sample _snake_case : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _snake_case : Optional[Any] = self.get_scheduler_config(**lowerCamelCase_ ) _snake_case : List[Any] = scheduler_class(**lowerCamelCase_ ) scheduler.set_timesteps(lowerCamelCase_ ) # copy over dummy past residuals _snake_case : Tuple = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCamelCase_ ) _snake_case : Optional[Any] = scheduler_class.from_pretrained(lowerCamelCase_ ) new_scheduler.set_timesteps(lowerCamelCase_ ) # copy over dummy past residuals _snake_case : Any = dummy_past_residuals[: new_scheduler.config.solver_order] _snake_case , _snake_case : List[Any] = sample, sample for t in range(lowerCamelCase_ , time_step + scheduler.config.solver_order + 1 ): _snake_case : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample _snake_case : int = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' pass def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Optional[int]=0 , **lowerCamelCase_ : str ): '''simple docstring''' _snake_case : str = dict(self.forward_default_kwargs ) _snake_case : Any = kwargs.pop('num_inference_steps' , lowerCamelCase_ ) _snake_case : str = self.dummy_sample _snake_case : str = 0.1 * sample _snake_case : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _snake_case : Optional[Any] = self.get_scheduler_config() _snake_case : Tuple = scheduler_class(**lowerCamelCase_ ) scheduler.set_timesteps(lowerCamelCase_ ) # copy over dummy past residuals (must be after setting timesteps) _snake_case : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCamelCase_ ) _snake_case : Any = scheduler_class.from_pretrained(lowerCamelCase_ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowerCamelCase_ ) # copy over dummy past residual (must be after setting timesteps) _snake_case : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order] _snake_case : Tuple = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample _snake_case : Tuple = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __UpperCAmelCase ( self : str , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' if scheduler is None: _snake_case : str = self.scheduler_classes[0] _snake_case : Any = self.get_scheduler_config(**lowerCamelCase_ ) _snake_case : Optional[int] = scheduler_class(**lowerCamelCase_ ) _snake_case : Optional[Any] = self.scheduler_classes[0] _snake_case : str = self.get_scheduler_config(**lowerCamelCase_ ) _snake_case : int = scheduler_class(**lowerCamelCase_ ) _snake_case : str = 10 _snake_case : Tuple = self.dummy_model() _snake_case : int = self.dummy_sample_deter scheduler.set_timesteps(lowerCamelCase_ ) for i, t in enumerate(scheduler.timesteps ): _snake_case : Any = model(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample return sample def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' _snake_case : Any = dict(self.forward_default_kwargs ) _snake_case : List[str] = kwargs.pop('num_inference_steps' , lowerCamelCase_ ) for scheduler_class in self.scheduler_classes: _snake_case : Optional[int] = self.get_scheduler_config() _snake_case : Optional[int] = scheduler_class(**lowerCamelCase_ ) _snake_case : str = self.dummy_sample _snake_case : Optional[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(lowerCamelCase_ , 'set_timesteps' ): scheduler.set_timesteps(lowerCamelCase_ ) elif num_inference_steps is not None and not hasattr(lowerCamelCase_ , 'set_timesteps' ): _snake_case : Optional[int] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _snake_case : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10] _snake_case : List[str] = dummy_past_residuals[: scheduler.config.solver_order] _snake_case : Dict = scheduler.timesteps[5] _snake_case : Tuple = scheduler.timesteps[6] _snake_case : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample _snake_case : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' _snake_case : Union[str, Any] = DEISMultistepScheduler(**self.get_scheduler_config() ) _snake_case : Union[str, Any] = self.full_loop(scheduler=lowerCamelCase_ ) _snake_case : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_mean.item() - 0.2_3916 ) < 1e-3 _snake_case : Optional[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _snake_case : str = DPMSolverMultistepScheduler.from_config(scheduler.config ) _snake_case : Optional[Any] = UniPCMultistepScheduler.from_config(scheduler.config ) _snake_case : Optional[Any] = DEISMultistepScheduler.from_config(scheduler.config ) _snake_case : Tuple = self.full_loop(scheduler=lowerCamelCase_ ) _snake_case : str = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_mean.item() - 0.2_3916 ) < 1e-3 def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' for timesteps in [25, 50, 1_00, 9_99, 10_00]: self.check_over_configs(num_train_timesteps=lowerCamelCase_ ) def __UpperCAmelCase ( self : int ): '''simple docstring''' self.check_over_configs(thresholding=lowerCamelCase_ ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=lowerCamelCase_ , prediction_type=lowerCamelCase_ , sample_max_value=lowerCamelCase_ , algorithm_type='deis' , solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , ) def __UpperCAmelCase ( self : Dict ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCamelCase_ ) def __UpperCAmelCase ( self : Dict ): '''simple docstring''' for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , algorithm_type=lowerCamelCase_ , ) _snake_case : List[str] = self.full_loop( solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , algorithm_type=lowerCamelCase_ , ) assert not torch.isnan(lowerCamelCase_ ).any(), "Samples have nan numbers" def __UpperCAmelCase ( self : Any ): '''simple docstring''' self.check_over_configs(lower_order_final=lowerCamelCase_ ) self.check_over_configs(lower_order_final=lowerCamelCase_ ) def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]: self.check_over_forward(num_inference_steps=lowerCamelCase_ , time_step=0 ) def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : str = self.full_loop() _snake_case : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_mean.item() - 0.2_3916 ) < 1e-3 def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : int = self.full_loop(prediction_type='v_prediction' ) _snake_case : int = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_mean.item() - 0.091 ) < 1e-3 def __UpperCAmelCase ( self : Any ): '''simple docstring''' _snake_case : Optional[int] = self.scheduler_classes[0] _snake_case : Optional[Any] = self.get_scheduler_config(thresholding=lowerCamelCase_ , dynamic_thresholding_ratio=0 ) _snake_case : Tuple = scheduler_class(**lowerCamelCase_ ) _snake_case : List[Any] = 10 _snake_case : Optional[int] = self.dummy_model() _snake_case : Any = self.dummy_sample_deter.half() scheduler.set_timesteps(lowerCamelCase_ ) for i, t in enumerate(scheduler.timesteps ): _snake_case : List[Any] = model(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample assert sample.dtype == torch.floataa
652
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): # Initialise PyTorch model _snake_case : Optional[int] = BertConfig.from_json_file(__lowerCAmelCase ) print(F'''Building PyTorch model from configuration: {config}''' ) _snake_case : List[str] = BertForPreTraining(__lowerCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __lowerCAmelCase ) if __name__ == "__main__": lowercase_ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowercase_ : List[str] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
652
1
def A__( __lowerCAmelCase ): _snake_case : list[list[float]] = [] for data in source_data: for i, el in enumerate(__lowerCAmelCase ): if len(__lowerCAmelCase ) < i + 1: data_lists.append([] ) data_lists[i].append(float(__lowerCAmelCase ) ) return data_lists def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : list[list[float]] = [] for dlist, weight in zip(__lowerCAmelCase , __lowerCAmelCase ): _snake_case : str = min(__lowerCAmelCase ) _snake_case : Optional[Any] = max(__lowerCAmelCase ) _snake_case : list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: _snake_case : int = F'''Invalid weight of {weight:f} provided''' raise ValueError(__lowerCAmelCase ) score_lists.append(__lowerCAmelCase ) return score_lists def A__( __lowerCAmelCase ): _snake_case : list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(__lowerCAmelCase ): _snake_case : Union[str, Any] = final_scores[j] + ele return final_scores def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Dict = get_data(__lowerCAmelCase ) _snake_case : Dict = calculate_each_score(__lowerCAmelCase , __lowerCAmelCase ) _snake_case : Optional[Any] = generate_final_scores(__lowerCAmelCase ) # append scores to source data for i, ele in enumerate(__lowerCAmelCase ): source_data[i].append(__lowerCAmelCase ) return source_data
652
import itertools import math def A__( __lowerCAmelCase ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A__( ): _snake_case : Optional[Any] = 2 while True: if is_prime(__lowerCAmelCase ): yield num num += 1 def A__( __lowerCAmelCase = 1_00_01 ): return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) ) if __name__ == "__main__": print(F'''{solution() = }''')
652
1
import math def A__( __lowerCAmelCase ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): _snake_case : int = F'''Input value of [number={number}] must be an integer''' raise TypeError(__lowerCAmelCase ) if number < 1: _snake_case : List[str] = F'''Input value of [number={number}] must be > 0''' raise ValueError(__lowerCAmelCase ) elif number == 1: return 3 elif number == 2: return 5 else: _snake_case : Union[str, Any] = int(math.log(number // 3 , 2 ) ) + 2 _snake_case : List[Any] = [3, 5] _snake_case : Dict = 2 _snake_case : Union[str, Any] = 3 for block in range(1 , __lowerCAmelCase ): for _ in range(__lowerCAmelCase ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): lowercase_ : Tuple = 0 try: lowercase_ : Any = proth(number) except ValueError: print(F'''ValueError: there is no {number}th Proth number''') continue print(F'''The {number}th Proth number: {value}''')
652
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowercase_ : Dict = '''bert-base-cased''' lowercase_ : Any = '''google/pegasus-xsum''' lowercase_ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] lowercase_ : Tuple = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] lowercase_ : Any = '''patrickvonplaten/t5-tiny-random''' lowercase_ : List[Any] = '''sshleifer/bart-tiny-random''' lowercase_ : Dict = '''sshleifer/tiny-mbart''' lowercase_ : str = '''sshleifer/tiny-marian-en-de''' def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : str = '\n'.join(__lowerCAmelCase ) Path(__lowerCAmelCase ).open('w' ).writelines(__lowerCAmelCase ) def A__( __lowerCAmelCase ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.source''' ) , __lowerCAmelCase ) _dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.target''' ) , __lowerCAmelCase ) return tmp_dir class lowercase ( a_ ): """simple docstring""" @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ): '''simple docstring''' _snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) _snake_case : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) _snake_case : Optional[int] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES ) _snake_case : Any = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES ) _snake_case : Dict = 4 _snake_case : Any = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated _snake_case , _snake_case : Optional[Any] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error. _snake_case : int = SeqaSeqDataset( lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , ) _snake_case : List[str] = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place _snake_case : List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str ): '''simple docstring''' _snake_case : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) _snake_case : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) _snake_case : Dict = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES ) _snake_case : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES ) _snake_case : Union[str, Any] = 4 _snake_case : Optional[int] = LegacySeqaSeqDataset( lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=20 , max_target_length=lowerCamelCase_ , ) _snake_case : Dict = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def __UpperCAmelCase ( self : Dict ): '''simple docstring''' _snake_case : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' ) _snake_case : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) _snake_case : Any = tmp_dir.joinpath('train.source' ).open().readlines() _snake_case : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(lowerCamelCase_ , lowerCamelCase_ , 1_28 , lowerCamelCase_ ) _snake_case : Tuple = {x.name for x in tmp_dir.iterdir()} _snake_case : Dict = {x.name for x in save_dir.iterdir()} _snake_case : str = save_dir.joinpath('train.source' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(lowerCamelCase_ ) < len(lowerCamelCase_ ) assert len(lowerCamelCase_ ) == 1 assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' ) def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' if not FAIRSEQ_AVAILABLE: return _snake_case , _snake_case , _snake_case : int = self._get_dataset(max_len=64 ) _snake_case : List[str] = 64 _snake_case : str = ds.make_dynamic_sampler(lowerCamelCase_ , required_batch_size_multiple=lowerCamelCase_ ) _snake_case : Optional[Any] = [len(lowerCamelCase_ ) for x in batch_sampler] assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples _snake_case : Union[str, Any] = DataLoader(lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 ) _snake_case : List[Any] = [] _snake_case : List[Any] = [] for batch in data_loader: _snake_case : Any = batch['input_ids'].shape _snake_case : str = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple _snake_case : int = np.product(batch['input_ids'].shape ) num_src_per_batch.append(lowerCamelCase_ ) if num_src_tokens > (max_tokens * 1.1): failures.append(lowerCamelCase_ ) assert num_src_per_batch[0] == max(lowerCamelCase_ ) if failures: raise AssertionError(f'''too many tokens in {len(lowerCamelCase_ )} batches''' ) def __UpperCAmelCase ( self : Optional[int] ): '''simple docstring''' _snake_case , _snake_case , _snake_case : str = self._get_dataset(max_len=5_12 ) _snake_case : Optional[Any] = 2 _snake_case : Dict = ds.make_sortish_sampler(lowerCamelCase_ , shuffle=lowerCamelCase_ ) _snake_case : int = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 ) _snake_case : str = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase_ ) _snake_case : Tuple = tokenizer.pad_token_id def count_pad_tokens(lowerCamelCase_ : List[str] , lowerCamelCase_ : Any="input_ids" ): return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) < sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) ) assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple=10_00 , lowerCamelCase_ : Tuple=1_28 ): '''simple docstring''' if os.getenv('USE_REAL_DATA' , lowerCamelCase_ ): _snake_case : Dict = 'examples/seq2seq/wmt_en_ro' _snake_case : List[Any] = max_len * 2 * 64 if not Path(lowerCamelCase_ ).joinpath('train.len' ).exists(): save_len_file(lowerCamelCase_ , lowerCamelCase_ ) else: _snake_case : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro' _snake_case : List[Any] = max_len * 4 save_len_file(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) _snake_case : str = SeqaSeqDataset( lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , n_obs=lowerCamelCase_ , ) return ds, max_tokens, tokenizer def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _snake_case , _snake_case , _snake_case : Any = self._get_dataset() _snake_case : List[str] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase_ ) ) _snake_case : Optional[Any] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase_ ) ) assert idsa.intersection(lowerCamelCase_ ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] ): '''simple docstring''' _snake_case : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ ) if tok_name == MBART_TINY: _snake_case : int = SeqaSeqDataset( lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , ) _snake_case : Optional[Any] = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: _snake_case : Tuple = SeqaSeqDataset( lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , ) _snake_case : List[Any] = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
652
1
import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient lowercase_ : str = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN''']) def A__( __lowerCAmelCase ): _snake_case : Union[str, Any] = test_results.split(' ' ) _snake_case : List[Any] = 0 _snake_case : str = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. _snake_case : str = expressions[-2] if '=' in expressions[-1] else expressions[-1] for i, expression in enumerate(__lowerCAmelCase ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def A__( __lowerCAmelCase ): _snake_case : Optional[int] = {} _snake_case : Optional[int] = None _snake_case : Dict = False for line in failures_short_lines.split('\n' ): if re.search(R'_ \[doctest\]' , __lowerCAmelCase ): _snake_case : List[Any] = True _snake_case : List[Any] = line.split(' ' )[2] elif in_error and not line.split(' ' )[0].isdigit(): _snake_case : Any = line _snake_case : Tuple = False return failures class lowercase : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Dict ): '''simple docstring''' _snake_case : int = title _snake_case : Dict = doc_test_results['time_spent'].split(',' )[0] _snake_case : Optional[int] = doc_test_results['success'] _snake_case : str = doc_test_results['failures'] _snake_case : Any = self.n_success + self.n_failures # Failures and success of the modeling tests _snake_case : List[str] = doc_test_results @property def __UpperCAmelCase ( self : str ): '''simple docstring''' _snake_case : Tuple = [self._time_spent] _snake_case : int = 0 for time in time_spent: _snake_case : Optional[int] = time.split(':' ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(lowerCamelCase_ ) == 1: _snake_case : Any = [0, 0, time_parts[0]] _snake_case , _snake_case , _snake_case : Any = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 36_00 + minutes * 60 + seconds _snake_case , _snake_case , _snake_case : Union[str, Any] = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60 return f'''{int(lowerCamelCase_ )}h{int(lowerCamelCase_ )}m{int(lowerCamelCase_ )}s''' @property def __UpperCAmelCase ( self : Dict ): '''simple docstring''' return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' return { "type": "section", "text": { "type": "plain_text", "text": f'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''', "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''', }, } @property def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' return { "type": "section", "text": { "type": "plain_text", "text": ( f'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in''' f''' {self.time}.''' ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''', }, } @property def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : Tuple = 40 _snake_case : Any = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(lowerCamelCase_ , lowerCamelCase_ )} _snake_case : Tuple = '' for category, failures in category_failures.items(): if len(lowerCamelCase_ ) == 0: continue if report != "": report += "\n\n" report += f'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(lowerCamelCase_ ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": f'''The following examples had failures:\n\n\n{report}\n''', }, } @property def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' _snake_case : int = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(lowerCamelCase_ ) @staticmethod def __UpperCAmelCase ( ): '''simple docstring''' _snake_case : Dict = [ { 'type': 'section', 'text': { 'type': 'plain_text', 'text': 'There was an issue running the tests.', }, 'accessory': { 'type': 'button', 'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True}, 'url': f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''', }, } ] print('Sending the following payload' ) print(json.dumps({'blocks': json.loads(lowerCamelCase_ )} ) ) client.chat_postMessage( channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=lowerCamelCase_ , ) def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' print('Sending the following payload' ) print(json.dumps({'blocks': json.loads(self.payload )} ) ) _snake_case : Union[str, Any] = f'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else 'All tests passed.' _snake_case : List[str] = client.chat_postMessage( channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=lowerCamelCase_ , ) def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int ): '''simple docstring''' _snake_case : str = '' for key, value in failures.items(): _snake_case : List[Any] = value[:2_00] + ' [Truncated]' if len(lowerCamelCase_ ) > 2_50 else value failures_text += f'''*{key}*\n_{value}_\n\n''' _snake_case : Any = job_name _snake_case : Optional[int] = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}} if job_link is not None: _snake_case : Union[str, Any] = { 'type': 'button', 'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True}, 'url': job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' if self.thread_ts is None: raise ValueError('Can only post reply if a post has been made.' ) _snake_case : Any = self.doc_test_results.pop('job_link' ) self.doc_test_results.pop('failures' ) self.doc_test_results.pop('success' ) self.doc_test_results.pop('time_spent' ) _snake_case : str = sorted(self.doc_test_results.items() , key=lambda lowerCamelCase_ : t[0] ) for job, job_result in sorted_dict: if len(job_result['failures'] ): _snake_case : Optional[int] = f'''*Num failures* :{len(job_result['failed'] )} \n''' _snake_case : List[Any] = job_result['failures'] _snake_case : Optional[Any] = self.get_reply_blocks(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , text=lowerCamelCase_ ) print('Sending the following reply' ) print(json.dumps({'blocks': blocks} ) ) client.chat_postMessage( channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=f'''Results for {job}''' , blocks=lowerCamelCase_ , thread_ts=self.thread_ts['ts'] , ) time.sleep(1 ) def A__( ): _snake_case : Dict = os.environ['GITHUB_RUN_ID'] _snake_case : str = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100''' _snake_case : Any = requests.get(__lowerCAmelCase ).json() _snake_case : Optional[int] = {} try: jobs.update({job['name']: job['html_url'] for job in result['jobs']} ) _snake_case : Any = math.ceil((result['total_count'] - 1_00) / 1_00 ) for i in range(__lowerCAmelCase ): _snake_case : Union[str, Any] = requests.get(url + F'''&page={i + 2}''' ).json() jobs.update({job['name']: job['html_url'] for job in result['jobs']} ) return jobs except Exception as e: print('Unknown error, could not fetch links.' , __lowerCAmelCase ) return {} def A__( __lowerCAmelCase ): _snake_case : str = {} if os.path.exists(__lowerCAmelCase ): _snake_case : Any = os.listdir(__lowerCAmelCase ) for file in files: try: with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , encoding='utf-8' ) as f: _snake_case : List[Any] = f.read() except UnicodeDecodeError as e: raise ValueError(F'''Could not open {os.path.join(__lowerCAmelCase , __lowerCAmelCase )}.''' ) from e return _artifact def A__( ): class lowercase : """simple docstring""" def __init__( self : str , lowerCamelCase_ : str ): '''simple docstring''' _snake_case : str = name _snake_case : int = [] def __str__( self : Any ): '''simple docstring''' return self.name def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : str ): '''simple docstring''' self.paths.append({'name': self.name, 'path': path} ) _snake_case : Dict[str, Artifact] = {} _snake_case : Dict = filter(os.path.isdir , os.listdir() ) for directory in directories: _snake_case : int = directory if artifact_name not in _available_artifacts: _snake_case : Optional[Any] = Artifact(__lowerCAmelCase ) _available_artifacts[artifact_name].add_path(__lowerCAmelCase ) return _available_artifacts if __name__ == "__main__": lowercase_ : Optional[int] = get_job_links() lowercase_ : int = retrieve_available_artifacts() lowercase_ : List[str] = collections.OrderedDict( [ ('''*.py''', '''API Examples'''), ('''*.md''', '''MD Examples'''), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' lowercase_ : List[str] = { v: { '''failed''': [], '''failures''': {}, } for v in docs.values() } # Link to the GitHub Action job lowercase_ : Dict = github_actions_job_links.get('''run_doctests''') lowercase_ : Dict = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0] lowercase_ : List[Any] = retrieve_artifact(artifact_path['''name''']) if "stats" in artifact: lowercase_ , lowercase_ , lowercase_ : Dict = handle_test_results(artifact['''stats''']) lowercase_ : str = failed lowercase_ : Dict = success lowercase_ : Optional[Any] = time_spent[1:-1] + ''', ''' lowercase_ : int = extract_first_line_failure(artifact['''failures_short''']) for line in artifact["summary_short"].split('''\n'''): if re.search('''FAILED''', line): lowercase_ : Dict = line.replace('''FAILED ''', '''''') lowercase_ : Tuple = line.split()[0].replace('''\n''', '''''') if "::" in line: lowercase_ , lowercase_ : Dict = line.split('''::''') else: lowercase_ , lowercase_ : int = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): lowercase_ : Optional[Any] = docs[file_regex] doc_test_results[category]["failed"].append(test) lowercase_ : int = all_failures[test] if test in all_failures else '''N/A''' lowercase_ : Tuple = failure break lowercase_ : int = Message('''🤗 Results of the doc tests.''', doc_test_results) message.post() message.post_reply()
652
from __future__ import annotations def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Dict = list(range(len(__lowerCAmelCase ) ) ) _snake_case : Optional[int] = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )] index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=__lowerCAmelCase ) _snake_case : float = 0 _snake_case : list[float] = [0] * len(__lowerCAmelCase ) for i in index: if weight[i] <= capacity: _snake_case : List[Any] = 1 max_value += value[i] capacity -= weight[i] else: _snake_case : Any = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
652
1
import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def A__( __lowerCAmelCase , __lowerCAmelCase ): if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer _snake_case : Dict = flax_key_tuple[:-1] + ('weight',) _snake_case : str = torch.permute(__lowerCAmelCase , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCAmelCase ): # linear layer _snake_case : Union[str, Any] = flax_key_tuple[:-1] + ('weight',) _snake_case : Optional[int] = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: _snake_case : Dict = flax_key_tuple[:-1] + ('weight',) return flax_key_tuple, flax_tensor def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): if "metadata" in layer: _snake_case : Any = layer.split('metadata' ) _snake_case : Optional[int] = ''.join(split_layer[0] )[:-1] _snake_case : Union[str, Any] = [tuple(('metadata' + split_layer[1]).split('/' ) )] elif "kvstore" in layer: _snake_case : Optional[Any] = layer.split('kvstore' ) _snake_case : List[Any] = ''.join(split_layer[0] )[:-1] _snake_case : Tuple = [tuple(('kvstore' + split_layer[1]).split('/' ) )] else: _snake_case : Tuple = layer.split('/' ) _snake_case : Any = '/'.join(split_layer[:-1] ) _snake_case : Dict = (split_layer[-1],) if "kvstore/path" in layer: _snake_case : Any = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}''' elif "kvstore/driver" in layer: _snake_case : Union[str, Any] = 'file' else: _snake_case : List[str] = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Any = rename_keys(__lowerCAmelCase ) _snake_case : Optional[Any] = {} for k, v in current_block.items(): _snake_case : int = v _snake_case : Tuple = new_current_block torch.save(__lowerCAmelCase , __lowerCAmelCase ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = WEIGHTS_NAME ): _snake_case : Dict = convert_file_size_to_int(__lowerCAmelCase ) _snake_case : Dict = [] _snake_case : Dict = {} _snake_case : Optional[int] = 0 _snake_case : Dict = 0 os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp: _snake_case : List[str] = serialization.msgpack_restore(fp.read() )['optimizer']['target'] _snake_case : Tuple = flatten_dict(__lowerCAmelCase , sep='/' ) _snake_case : Optional[int] = {} for layer in checkpoint_info.keys(): _snake_case , _snake_case , _snake_case : Any = get_key_and_tensorstore_dict( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if curr_real_layer_name in all_layers: _snake_case : Tuple = content else: _snake_case : Dict = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file _snake_case : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() _snake_case : Optional[int] = torch.tensor(__lowerCAmelCase ) _snake_case : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts _snake_case , _snake_case : Any = rename_base_flax_keys(tuple(key.split('/' ) ) , __lowerCAmelCase ) _snake_case : int = '/'.join(__lowerCAmelCase ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: _snake_case : List[str] = os.path.join( __lowerCAmelCase , weights_name.replace('.bin' , F'''-{len(__lowerCAmelCase )+1:05d}-of-???.bin''' ) ) rename_and_save_block(__lowerCAmelCase , __lowerCAmelCase ) sharded_state_dicts.append(current_block.keys() ) del current_block _snake_case : int = {} _snake_case : int = 0 _snake_case : Union[str, Any] = raw_weights.to(getattr(__lowerCAmelCase , __lowerCAmelCase ) ) current_block_size += weight_size total_size += weight_size # Add the last block _snake_case : Tuple = os.path.join(__lowerCAmelCase , weights_name.replace('.bin' , F'''-{len(__lowerCAmelCase )+1:05d}-of-???.bin''' ) ) rename_and_save_block(__lowerCAmelCase , __lowerCAmelCase ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(__lowerCAmelCase ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index _snake_case : List[str] = {} _snake_case : List[Any] = {} for idx, shard in enumerate(__lowerCAmelCase ): _snake_case : List[Any] = weights_name.replace( '.bin' , F'''-{idx+1:05d}-of-{len(__lowerCAmelCase ):05d}.bin''' ) # len(sharded_state_dicts):05d} _snake_case : Dict = os.path.join(__lowerCAmelCase , weights_name.replace('.bin' , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) _snake_case : Any = shard for key in shard: _snake_case : int = shard_file # Add the metadata _snake_case : List[Any] = {'total_size': total_size} _snake_case : str = {'metadata': metadata, 'weight_map': weight_map} with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , 'w' , encoding='utf-8' ) as f: _snake_case : Tuple = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + '\n' f.write(__lowerCAmelCase ) return metadata, index if __name__ == "__main__": lowercase_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--switch_t5x_checkpoint_path''', default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''', type=str, required=False, help='''Path to a directory containing a folder per layer. Follows the original Google format.''', ) parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''') parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''') parser.add_argument( '''--pytorch_dump_folder_path''', default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''', type=str, required=False, help='''Path to the output pytorch model.''', ) lowercase_ : List[str] = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def A__( ): from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer _snake_case : Any = SwitchTransformersConfig.from_pretrained('google/switch-base-8' ) config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' ) _snake_case : Tuple = SwitchTransformersForConditionalGeneration.from_pretrained( '/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' ) _snake_case : Tuple = TaTokenizer.from_pretrained('t5-small' ) _snake_case : int = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.' _snake_case : List[str] = tokenizer(__lowerCAmelCase , return_tensors='pt' ).input_ids _snake_case : Dict = model.generate(__lowerCAmelCase , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
652
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase_ : Any = { '''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''], '''tokenization_canine''': ['''CanineTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[Any] = [ '''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CanineForMultipleChoice''', '''CanineForQuestionAnswering''', '''CanineForSequenceClassification''', '''CanineForTokenClassification''', '''CanineLayer''', '''CanineModel''', '''CaninePreTrainedModel''', '''load_tf_weights_in_canine''', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
652
1
import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def A__( __lowerCAmelCase ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4e00 and cp <= 0X9fff) or (cp >= 0X3400 and cp <= 0X4dbf) # or (cp >= 0X2_0000 and cp <= 0X2_a6df) # or (cp >= 0X2_a700 and cp <= 0X2_b73f) # or (cp >= 0X2_b740 and cp <= 0X2_b81f) # or (cp >= 0X2_b820 and cp <= 0X2_ceaf) # or (cp >= 0Xf900 and cp <= 0Xfaff) or (cp >= 0X2_f800 and cp <= 0X2_fa1f) # ): # return True return False def A__( __lowerCAmelCase ): # word like '180' or '身高' or '神' for char in word: _snake_case : List[str] = ord(__lowerCAmelCase ) if not _is_chinese_char(__lowerCAmelCase ): return 0 return 1 def A__( __lowerCAmelCase ): _snake_case : List[str] = set() for token in tokens: _snake_case : Optional[Any] = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase ) if chinese_word: word_set.add(__lowerCAmelCase ) _snake_case : Any = list(__lowerCAmelCase ) return word_list def A__( __lowerCAmelCase , __lowerCAmelCase ): if not chinese_word_set: return bert_tokens _snake_case : int = max([len(__lowerCAmelCase ) for w in chinese_word_set] ) _snake_case : Any = bert_tokens _snake_case , _snake_case : Any = 0, len(__lowerCAmelCase ) while start < end: _snake_case : str = True if is_chinese(bert_word[start] ): _snake_case : str = min(end - start , __lowerCAmelCase ) for i in range(__lowerCAmelCase , 1 , -1 ): _snake_case : Any = ''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): _snake_case : Tuple = '##' + bert_word[j] _snake_case : List[Any] = start + i _snake_case : Any = False break if single_word: start += 1 return bert_word def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : List[str] = [] for i in range(0 , len(__lowerCAmelCase ) , 1_00 ): _snake_case : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=['cws'] ).cws _snake_case : Dict = [get_chinese_word(__lowerCAmelCase ) for r in res] ltp_res.extend(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) _snake_case : Any = [] for i in range(0 , len(__lowerCAmelCase ) , 1_00 ): _snake_case : Union[str, Any] = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=5_12 ) bert_res.extend(res['input_ids'] ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) _snake_case : List[Any] = [] for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ): _snake_case : Dict = [] for id in input_ids: _snake_case : Union[str, Any] = bert_tokenizer._convert_id_to_token(__lowerCAmelCase ) input_tokens.append(__lowerCAmelCase ) _snake_case : str = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase ) _snake_case : List[Any] = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__lowerCAmelCase ): if token[:2] == "##": _snake_case : Dict = token[2:] # save chinese tokens' pos if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ): ref_id.append(__lowerCAmelCase ) ref_ids.append(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) return ref_ids def A__( __lowerCAmelCase ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , 'r' , encoding='utf-8' ) as f: _snake_case : Tuple = f.readlines() _snake_case : Any = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' _snake_case : str = LTP(args.ltp ) # faster in GPU device _snake_case : int = BertTokenizer.from_pretrained(args.bert ) _snake_case : List[str] = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) with open(args.save_path , 'w' , encoding='utf-8' ) as f: _snake_case : int = [json.dumps(__lowerCAmelCase ) + '\n' for ref in ref_ids] f.writelines(__lowerCAmelCase ) if __name__ == "__main__": lowercase_ : Optional[int] = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', required=False, type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', required=False, type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''', ) parser.add_argument( '''--bert''', required=False, type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''', ) parser.add_argument( '''--save_path''', required=False, type=str, default='''./resources/ref.txt''', help='''path to save res''', ) lowercase_ : List[str] = parser.parse_args() main(args)
652
import math def A__( __lowerCAmelCase ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A__( __lowerCAmelCase = 1_00_01 ): try: _snake_case : int = int(__lowerCAmelCase ) except (TypeError, ValueError): raise TypeError('Parameter nth must be int or castable to int.' ) from None if nth <= 0: raise ValueError('Parameter nth must be greater than or equal to one.' ) _snake_case : list[int] = [] _snake_case : List[Any] = 2 while len(__lowerCAmelCase ) < nth: if is_prime(__lowerCAmelCase ): primes.append(__lowerCAmelCase ) num += 1 else: num += 1 return primes[len(__lowerCAmelCase ) - 1] if __name__ == "__main__": print(F'''{solution() = }''')
652
1
def A__( __lowerCAmelCase , __lowerCAmelCase ): if number < 0 or shift_amount < 0: raise ValueError('both inputs must be positive integers' ) _snake_case : Optional[int] = str(bin(__lowerCAmelCase ) ) binary_number += "0" * shift_amount return binary_number def A__( __lowerCAmelCase , __lowerCAmelCase ): if number < 0 or shift_amount < 0: raise ValueError('both inputs must be positive integers' ) _snake_case : Dict = str(bin(__lowerCAmelCase ) )[2:] if shift_amount >= len(__lowerCAmelCase ): return "0b0" _snake_case : str = binary_number[: len(__lowerCAmelCase ) - shift_amount] return "0b" + shifted_binary_number def A__( __lowerCAmelCase , __lowerCAmelCase ): if number >= 0: # Get binary representation of positive number _snake_case : int = '0' + str(bin(__lowerCAmelCase ) ).strip('-' )[2:] else: # Get binary (2's complement) representation of negative number _snake_case : str = len(bin(__lowerCAmelCase )[3:] ) # Find 2's complement of number _snake_case : Optional[Any] = bin(abs(__lowerCAmelCase ) - (1 << binary_number_length) )[3:] _snake_case : int = ( '1' + '0' * (binary_number_length - len(__lowerCAmelCase )) + binary_number ) if shift_amount >= len(__lowerCAmelCase ): return "0b" + binary_number[0] * len(__lowerCAmelCase ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(__lowerCAmelCase ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
652
import torch from transformers import AutoModel class lowercase ( torch.nn.Module ): """simple docstring""" def __init__( self : Tuple , lowerCamelCase_ : Dict="sayef/fsner-bert-base-uncased" ): '''simple docstring''' super(lowerCamelCase_ , self ).__init__() _snake_case : Optional[Any] = AutoModel.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ ) _snake_case : str = torch.nn.CosineSimilarity(3 , 1e-08 ) _snake_case : str = torch.nn.Softmax(dim=1 ) def __UpperCAmelCase ( self : int , **lowerCamelCase_ : List[str] ): '''simple docstring''' return self.bert(**lowerCamelCase_ ).last_hidden_state def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' return token_embeddings.sum(2 , keepdim=lowerCamelCase_ ) def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]=1 ): '''simple docstring''' return self.softmax(T * self.cos(lowerCamelCase_ , lowerCamelCase_ ) ) def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' _snake_case : int = W_supports['sizes'].tolist() _snake_case : int = W_supports['start_token_id'].item() _snake_case : List[str] = W_supports['end_token_id'].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] _snake_case : Optional[int] = self.BERT(**lowerCamelCase_ ) _snake_case : Optional[Any] = self.BERT(**lowerCamelCase_ ) _snake_case : Optional[int] = None _snake_case : Optional[int] = None _snake_case : List[str] = W_supports['input_ids'] == start_token_id _snake_case : Union[str, Any] = W_supports['input_ids'] == end_token_id for i, size in enumerate(lowerCamelCase_ ): if i == 0: _snake_case : str = 0 else: _snake_case : Union[str, Any] = support_sizes[i - 1] _snake_case : Tuple = S[s : s + size][start_token_masks[s : s + size]] _snake_case : Optional[int] = S[s : s + size][end_token_masks[s : s + size]] _snake_case : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) _snake_case : Union[str, Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: _snake_case : Optional[Any] = torch.vstack((p_starts, p_start) ) _snake_case : List[str] = torch.vstack((p_ends, p_end) ) else: _snake_case : Union[str, Any] = p_start _snake_case : Any = p_end return p_starts, p_ends
652
1
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( '''stable diffusion controlnet''', '''0.22.0''', '''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''', standard_warn=False, stacklevel=3, )
652
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowercase ( a_ , a_ , a_ ): """simple docstring""" @register_to_config def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , ): '''simple docstring''' super().__init__() _snake_case : Optional[int] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : List[str] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Union[str, Any] = False _snake_case : Tuple = nn.Dropout(p=lowerCamelCase_ ) _snake_case : Union[str, Any] = TaConfig( vocab_size=lowerCamelCase_ , d_model=lowerCamelCase_ , num_heads=lowerCamelCase_ , d_kv=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , feed_forward_proj=lowerCamelCase_ , is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , ) _snake_case : Union[str, Any] = nn.ModuleList() for lyr_num in range(lowerCamelCase_ ): _snake_case : Any = TaBlock(lowerCamelCase_ ) self.encoders.append(lowerCamelCase_ ) _snake_case : Tuple = TaLayerNorm(lowerCamelCase_ ) _snake_case : List[str] = nn.Dropout(p=lowerCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple ): '''simple docstring''' _snake_case : Any = self.token_embedder(lowerCamelCase_ ) _snake_case : List[Any] = encoder_input_tokens.shape[1] _snake_case : Any = torch.arange(lowerCamelCase_ , device=encoder_input_tokens.device ) x += self.position_encoding(lowerCamelCase_ ) _snake_case : Tuple = self.dropout_pre(lowerCamelCase_ ) # inverted the attention mask _snake_case : Dict = encoder_input_tokens.size() _snake_case : Optional[int] = self.get_extended_attention_mask(lowerCamelCase_ , lowerCamelCase_ ) for lyr in self.encoders: _snake_case : str = lyr(lowerCamelCase_ , lowerCamelCase_ )[0] _snake_case : Any = self.layer_norm(lowerCamelCase_ ) return self.dropout_post(lowerCamelCase_ ), encoder_inputs_mask
652
1
import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels lowercase_ : Optional[int] = object() # For specifying empty leaf dict `{}` lowercase_ : List[Any] = object() def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Optional[Any] = tuple((re.compile(x + '$' ) for x in qs) ) for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ): _snake_case : Tuple = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , ks[i:] )] if matches and all(__lowerCAmelCase ): return True return False def A__( __lowerCAmelCase ): def replace(__lowerCAmelCase , __lowerCAmelCase ): for rule, replacement in rules: if _match(__lowerCAmelCase , __lowerCAmelCase ): return replacement return val return replace def A__( ): return [ # embeddings (("transformer", "wpe", "embedding"), P('mp' , __lowerCAmelCase )), (("transformer", "wte", "embedding"), P('mp' , __lowerCAmelCase )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase , 'mp' )), (("attention", "out_proj", "kernel"), P('mp' , __lowerCAmelCase )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__lowerCAmelCase , 'mp' )), (("mlp", "c_fc", "bias"), P('mp' )), (("mlp", "c_proj", "kernel"), P('mp' , __lowerCAmelCase )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def A__( __lowerCAmelCase ): _snake_case : Optional[Any] = _get_partition_rules() _snake_case : Optional[Any] = _replacement_rules(__lowerCAmelCase ) _snake_case : str = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )} _snake_case : str = {k: replace(__lowerCAmelCase , __lowerCAmelCase ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__lowerCAmelCase ) )
652
def A__( __lowerCAmelCase ): assert column_title.isupper() _snake_case : List[Any] = 0 _snake_case : List[str] = len(__lowerCAmelCase ) - 1 _snake_case : Dict = 0 while index >= 0: _snake_case : List[str] = (ord(column_title[index] ) - 64) * pow(26 , __lowerCAmelCase ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
652
1
def A__( __lowerCAmelCase , __lowerCAmelCase ): return int((input_a, input_a).count(0 ) != 0 ) def A__( ): assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
652
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets lowercase_ : List[str] = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' lowercase_ : Optional[int] = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' lowercase_ : Any = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): """simple docstring""" def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if version.parse(scb.__version__ ) < version.parse('1.4.12' ): raise ImportWarning( 'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n' 'You can install it with `pip install "sacrebleu>=1.4.12"`.' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ), } ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[ 'https://github.com/jhclark/tercom', ] , ) def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ): '''simple docstring''' _snake_case : str = len(references[0] ) if any(len(lowerCamelCase_ ) != references_per_prediction for refs in references ): raise ValueError('Sacrebleu requires the same number of references for each prediction' ) _snake_case : int = [[refs[i] for refs in references] for i in range(lowerCamelCase_ )] _snake_case : Optional[int] = TER( normalized=lowerCamelCase_ , no_punct=lowerCamelCase_ , asian_support=lowerCamelCase_ , case_sensitive=lowerCamelCase_ , ) _snake_case : Optional[Any] = sb_ter.corpus_score(lowerCamelCase_ , lowerCamelCase_ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
652
1
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel lowercase_ : Any = logging.getLogger(__name__) def A__( __lowerCAmelCase , __lowerCAmelCase ): # save results if os.path.exists(__lowerCAmelCase ): if os.path.exists(os.path.join(__lowerCAmelCase , 'config.json' ) ) and os.path.isfile( os.path.join(__lowerCAmelCase , 'config.json' ) ): os.remove(os.path.join(__lowerCAmelCase , 'config.json' ) ) if os.path.exists(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ): os.remove(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) else: os.makedirs(__lowerCAmelCase ) model.save_pretrained(__lowerCAmelCase ) def A__( __lowerCAmelCase , __lowerCAmelCase=False ): _snake_case : int = 2 if unlogit: _snake_case : Dict = torch.pow(__lowerCAmelCase , __lowerCAmelCase ) _snake_case : Optional[int] = p * torch.log(__lowerCAmelCase ) _snake_case : List[str] = 0 return -plogp.sum(dim=-1 ) def A__( __lowerCAmelCase ): logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(__lowerCAmelCase ) ) ) ) for row in range(len(__lowerCAmelCase ) ): if tensor.dtype != torch.long: logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False ): _snake_case , _snake_case : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads _snake_case : Optional[int] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device ) _snake_case : Union[str, Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device ) if head_mask is None: _snake_case : List[str] = torch.ones(__lowerCAmelCase , __lowerCAmelCase ).to(args.device ) head_mask.requires_grad_(requires_grad=__lowerCAmelCase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _snake_case : List[str] = None _snake_case : str = 0.0 _snake_case : List[str] = 0.0 for step, inputs in enumerate(tqdm(__lowerCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ): _snake_case : Dict = tuple(t.to(args.device ) for t in inputs ) ((_snake_case) , ) : int = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _snake_case : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase , head_mask=__lowerCAmelCase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _snake_case , _snake_case , _snake_case : Dict = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__lowerCAmelCase ): _snake_case : int = entropy(attn.detach() , __lowerCAmelCase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__lowerCAmelCase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _snake_case : List[str] = 2 _snake_case : Any = torch.pow(torch.pow(__lowerCAmelCase , __lowerCAmelCase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: _snake_case : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(__lowerCAmelCase ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(__lowerCAmelCase ) logger.info('Head ranked by importance scores' ) _snake_case : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _snake_case : Optional[Any] = torch.arange( head_importance.numel() , device=args.device ) _snake_case : Union[str, Any] = head_ranks.view_as(__lowerCAmelCase ) print_ad_tensor(__lowerCAmelCase ) return attn_entropy, head_importance, total_loss def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case , _snake_case , _snake_case : int = compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase ) _snake_case : List[str] = 1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' , __lowerCAmelCase , original_score * args.masking_threshold ) _snake_case : Optional[Any] = torch.ones_like(__lowerCAmelCase ) _snake_case : Tuple = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _snake_case : Dict = original_score while current_score >= original_score * args.masking_threshold: _snake_case : List[str] = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _snake_case : Optional[Any] = float('Inf' ) _snake_case : Optional[int] = head_importance.view(-1 ).sort()[1] if len(__lowerCAmelCase ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads _snake_case : List[Any] = current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) ) _snake_case : Tuple = new_head_mask.view(-1 ) _snake_case : List[str] = 0.0 _snake_case : int = new_head_mask.view_as(__lowerCAmelCase ) _snake_case : Optional[Any] = new_head_mask.clone().detach() print_ad_tensor(__lowerCAmelCase ) # Compute metric and head importance again _snake_case , _snake_case , _snake_case : List[str] = compute_heads_importance( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , head_mask=__lowerCAmelCase ) _snake_case : Union[str, Any] = 1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' , __lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , ) logger.info('Final head mask' ) print_ad_tensor(__lowerCAmelCase ) np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() ) return head_mask def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : List[Any] = datetime.now() _snake_case , _snake_case , _snake_case : List[Any] = compute_heads_importance( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase ) _snake_case : str = 1 / loss _snake_case : Optional[int] = datetime.now() - before_time _snake_case : Optional[int] = sum(p.numel() for p in model.parameters() ) _snake_case : Optional[Any] = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCAmelCase ) ) } for k, v in heads_to_prune.items(): if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _snake_case : Optional[int] = [ v, ] assert sum(len(__lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__lowerCAmelCase ) _snake_case : Optional[Any] = sum(p.numel() for p in model.parameters() ) _snake_case : List[str] = datetime.now() _snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase , actually_pruned=__lowerCAmelCase , ) _snake_case : Dict = 1 / loss _snake_case : str = datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __lowerCAmelCase , __lowerCAmelCase , pruned_num_params / original_num_params * 1_00 , ) logger.info('Pruning: score with masking: %f score with pruning: %f' , __lowerCAmelCase , __lowerCAmelCase ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 ) save_model(__lowerCAmelCase , args.output_dir ) def A__( ): _snake_case : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , ) parser.add_argument( '--model_name_or_path' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , ) # Other parameters parser.add_argument( '--config_name' , default='' , type=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , ) parser.add_argument( '--tokenizer_name' , default='' , type=__lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , ) parser.add_argument( '--cache_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , ) parser.add_argument( '--data_subset' , type=__lowerCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , ) parser.add_argument( '--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' , default=0.9 , type=__lowerCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , ) parser.add_argument( '--masking_amount' , default=0.1 , type=__lowerCAmelCase , help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' , default='acc' , type=__lowerCAmelCase , help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' , default=1_28 , type=__lowerCAmelCase , help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) , ) parser.add_argument('--batch_size' , default=1 , type=__lowerCAmelCase , help='Batch size.' ) parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 ) parser.add_argument('--local_rank' , type=__lowerCAmelCase , default=-1 , help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' ) _snake_case : List[str] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _snake_case : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) _snake_case : Any = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _snake_case : Union[str, Any] = torch.device('cuda' , args.local_rank ) _snake_case : Dict = 1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _snake_case : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _snake_case : List[str] = nn.parallel.DistributedDataParallel( __lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCAmelCase ) elif args.n_gpu > 1: _snake_case : int = nn.DataParallel(__lowerCAmelCase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase ) torch.save(__lowerCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' , __lowerCAmelCase ) # Prepare dataset _snake_case : str = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _snake_case : Dict = (torch.from_numpy(__lowerCAmelCase ),) _snake_case : List[Any] = TensorDataset(*__lowerCAmelCase ) _snake_case : List[str] = RandomSampler(__lowerCAmelCase ) _snake_case : List[str] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _snake_case : Union[str, Any] = mask_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) prune_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": main()
652
import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels lowercase_ : Optional[int] = object() # For specifying empty leaf dict `{}` lowercase_ : List[Any] = object() def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Optional[Any] = tuple((re.compile(x + '$' ) for x in qs) ) for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ): _snake_case : Tuple = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , ks[i:] )] if matches and all(__lowerCAmelCase ): return True return False def A__( __lowerCAmelCase ): def replace(__lowerCAmelCase , __lowerCAmelCase ): for rule, replacement in rules: if _match(__lowerCAmelCase , __lowerCAmelCase ): return replacement return val return replace def A__( ): return [ # embeddings (("transformer", "wpe", "embedding"), P('mp' , __lowerCAmelCase )), (("transformer", "wte", "embedding"), P('mp' , __lowerCAmelCase )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase , 'mp' )), (("attention", "out_proj", "kernel"), P('mp' , __lowerCAmelCase )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__lowerCAmelCase , 'mp' )), (("mlp", "c_fc", "bias"), P('mp' )), (("mlp", "c_proj", "kernel"), P('mp' , __lowerCAmelCase )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def A__( __lowerCAmelCase ): _snake_case : Optional[Any] = _get_partition_rules() _snake_case : Optional[Any] = _replacement_rules(__lowerCAmelCase ) _snake_case : str = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )} _snake_case : str = {k: replace(__lowerCAmelCase , __lowerCAmelCase ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__lowerCAmelCase ) )
652
1
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase_ : Optional[Any] = logging.get_logger(__name__) lowercase_ : List[str] = {'''vocab_file''': '''vocab.txt'''} lowercase_ : Tuple = { '''vocab_file''': { '''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''', }, } lowercase_ : List[str] = { '''openbmb/cpm-ant-10b''': 1024, } def A__( __lowerCAmelCase ): _snake_case : Dict = collections.OrderedDict() with open(__lowerCAmelCase , 'r' , encoding='utf-8' ) as reader: _snake_case : List[str] = reader.readlines() for index, token in enumerate(__lowerCAmelCase ): _snake_case : str = token.rstrip('\n' ) _snake_case : Dict = index return vocab class lowercase ( a_ ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : int="<unk>" , lowerCamelCase_ : Any=2_00 ): '''simple docstring''' _snake_case : str = vocab _snake_case : List[str] = unk_token _snake_case : Optional[int] = max_input_chars_per_word def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : int ): '''simple docstring''' _snake_case : List[Any] = list(lowerCamelCase_ ) if len(lowerCamelCase_ ) > self.max_input_chars_per_word: return [self.unk_token] _snake_case : List[str] = 0 _snake_case : Tuple = [] while start < len(lowerCamelCase_ ): _snake_case : Union[str, Any] = len(lowerCamelCase_ ) _snake_case : Any = None while start < end: _snake_case : Any = ''.join(chars[start:end] ) if substr in self.vocab: _snake_case : int = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(lowerCamelCase_ ) _snake_case : Any = end return sub_tokens class lowercase ( a_ ): """simple docstring""" _UpperCamelCase : Tuple = VOCAB_FILES_NAMES _UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Optional[int] = ["input_ids", "attention_mask"] _UpperCamelCase : Tuple = False def __init__( self : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any="<d>" , lowerCamelCase_ : Tuple="</d>" , lowerCamelCase_ : Union[str, Any]="<s>" , lowerCamelCase_ : Any="</s>" , lowerCamelCase_ : Any="<pad>" , lowerCamelCase_ : Any="<unk>" , lowerCamelCase_ : List[Any]="</n>" , lowerCamelCase_ : Union[str, Any]="</_>" , lowerCamelCase_ : int="left" , **lowerCamelCase_ : List[str] , ): '''simple docstring''' requires_backends(self , ['jieba'] ) super().__init__( bod_token=lowerCamelCase_ , eod_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , line_token=lowerCamelCase_ , space_token=lowerCamelCase_ , padding_side=lowerCamelCase_ , **lowerCamelCase_ , ) _snake_case : Dict = bod_token _snake_case : Optional[Any] = eod_token _snake_case : int = load_vocab(lowerCamelCase_ ) _snake_case : List[Any] = self.encoder[space_token] _snake_case : List[Any] = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] _snake_case : List[str] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_ : x[1] ) ) _snake_case : str = {v: k for k, v in self.encoder.items()} _snake_case : Tuple = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def __UpperCAmelCase ( self : Optional[int] ): '''simple docstring''' return self.encoder[self.bod_token] @property def __UpperCAmelCase ( self : str ): '''simple docstring''' return self.encoder[self.eod_token] @property def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' return self.encoder["\n"] @property def __UpperCAmelCase ( self : Any ): '''simple docstring''' return len(self.encoder ) def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : Dict ): '''simple docstring''' _snake_case : Any = [] for x in jieba.cut(lowerCamelCase_ , cut_all=lowerCamelCase_ ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase_ ) ) return output_tokens def __UpperCAmelCase ( self : str , lowerCamelCase_ : int , **lowerCamelCase_ : List[str] ): '''simple docstring''' _snake_case : List[Any] = [i for i in token_ids if i >= 0] _snake_case : List[Any] = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(lowerCamelCase_ , **lowerCamelCase_ ) def __UpperCAmelCase ( self : int , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return token in self.encoder def __UpperCAmelCase ( self : str , lowerCamelCase_ : List[str] ): '''simple docstring''' return "".join(lowerCamelCase_ ) def __UpperCAmelCase ( self : str , lowerCamelCase_ : Optional[int] ): '''simple docstring''' return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) ) def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' return self.decoder.get(lowerCamelCase_ , self.unk_token ) def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): '''simple docstring''' if os.path.isdir(lowerCamelCase_ ): _snake_case : List[str] = os.path.join( lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) else: _snake_case : Optional[int] = (filename_prefix + '-' if filename_prefix else '') + save_directory _snake_case : int = 0 if " " in self.encoder: _snake_case : List[Any] = self.encoder[' '] del self.encoder[" "] if "\n" in self.encoder: _snake_case : int = self.encoder['\n'] del self.encoder["\n"] _snake_case : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_ : x[1] ) ) with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' ' Please check that the vocabulary is not corrupted!' ) _snake_case : List[Any] = token_index writer.write(token + '\n' ) index += 1 return (vocab_file,) def __UpperCAmelCase ( self : Any , lowerCamelCase_ : List[int] , lowerCamelCase_ : List[int] = None ): '''simple docstring''' if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is not None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) return [1] + ([0] * len(lowerCamelCase_ ))
652
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel lowercase_ : Any = logging.getLogger(__name__) def A__( __lowerCAmelCase , __lowerCAmelCase ): # save results if os.path.exists(__lowerCAmelCase ): if os.path.exists(os.path.join(__lowerCAmelCase , 'config.json' ) ) and os.path.isfile( os.path.join(__lowerCAmelCase , 'config.json' ) ): os.remove(os.path.join(__lowerCAmelCase , 'config.json' ) ) if os.path.exists(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ): os.remove(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) else: os.makedirs(__lowerCAmelCase ) model.save_pretrained(__lowerCAmelCase ) def A__( __lowerCAmelCase , __lowerCAmelCase=False ): _snake_case : int = 2 if unlogit: _snake_case : Dict = torch.pow(__lowerCAmelCase , __lowerCAmelCase ) _snake_case : Optional[int] = p * torch.log(__lowerCAmelCase ) _snake_case : List[str] = 0 return -plogp.sum(dim=-1 ) def A__( __lowerCAmelCase ): logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(__lowerCAmelCase ) ) ) ) for row in range(len(__lowerCAmelCase ) ): if tensor.dtype != torch.long: logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False ): _snake_case , _snake_case : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads _snake_case : Optional[int] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device ) _snake_case : Union[str, Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device ) if head_mask is None: _snake_case : List[str] = torch.ones(__lowerCAmelCase , __lowerCAmelCase ).to(args.device ) head_mask.requires_grad_(requires_grad=__lowerCAmelCase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _snake_case : List[str] = None _snake_case : str = 0.0 _snake_case : List[str] = 0.0 for step, inputs in enumerate(tqdm(__lowerCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ): _snake_case : Dict = tuple(t.to(args.device ) for t in inputs ) ((_snake_case) , ) : int = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _snake_case : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase , head_mask=__lowerCAmelCase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _snake_case , _snake_case , _snake_case : Dict = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__lowerCAmelCase ): _snake_case : int = entropy(attn.detach() , __lowerCAmelCase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__lowerCAmelCase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _snake_case : List[str] = 2 _snake_case : Any = torch.pow(torch.pow(__lowerCAmelCase , __lowerCAmelCase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: _snake_case : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(__lowerCAmelCase ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(__lowerCAmelCase ) logger.info('Head ranked by importance scores' ) _snake_case : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _snake_case : Optional[Any] = torch.arange( head_importance.numel() , device=args.device ) _snake_case : Union[str, Any] = head_ranks.view_as(__lowerCAmelCase ) print_ad_tensor(__lowerCAmelCase ) return attn_entropy, head_importance, total_loss def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case , _snake_case , _snake_case : int = compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase ) _snake_case : List[str] = 1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' , __lowerCAmelCase , original_score * args.masking_threshold ) _snake_case : Optional[Any] = torch.ones_like(__lowerCAmelCase ) _snake_case : Tuple = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _snake_case : Dict = original_score while current_score >= original_score * args.masking_threshold: _snake_case : List[str] = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _snake_case : Optional[Any] = float('Inf' ) _snake_case : Optional[int] = head_importance.view(-1 ).sort()[1] if len(__lowerCAmelCase ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads _snake_case : List[Any] = current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) ) _snake_case : Tuple = new_head_mask.view(-1 ) _snake_case : List[str] = 0.0 _snake_case : int = new_head_mask.view_as(__lowerCAmelCase ) _snake_case : Optional[Any] = new_head_mask.clone().detach() print_ad_tensor(__lowerCAmelCase ) # Compute metric and head importance again _snake_case , _snake_case , _snake_case : List[str] = compute_heads_importance( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , head_mask=__lowerCAmelCase ) _snake_case : Union[str, Any] = 1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' , __lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , ) logger.info('Final head mask' ) print_ad_tensor(__lowerCAmelCase ) np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() ) return head_mask def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : List[Any] = datetime.now() _snake_case , _snake_case , _snake_case : List[Any] = compute_heads_importance( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase ) _snake_case : str = 1 / loss _snake_case : Optional[int] = datetime.now() - before_time _snake_case : Optional[int] = sum(p.numel() for p in model.parameters() ) _snake_case : Optional[Any] = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCAmelCase ) ) } for k, v in heads_to_prune.items(): if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _snake_case : Optional[int] = [ v, ] assert sum(len(__lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__lowerCAmelCase ) _snake_case : Optional[Any] = sum(p.numel() for p in model.parameters() ) _snake_case : List[str] = datetime.now() _snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase , actually_pruned=__lowerCAmelCase , ) _snake_case : Dict = 1 / loss _snake_case : str = datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __lowerCAmelCase , __lowerCAmelCase , pruned_num_params / original_num_params * 1_00 , ) logger.info('Pruning: score with masking: %f score with pruning: %f' , __lowerCAmelCase , __lowerCAmelCase ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 ) save_model(__lowerCAmelCase , args.output_dir ) def A__( ): _snake_case : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , ) parser.add_argument( '--model_name_or_path' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , ) # Other parameters parser.add_argument( '--config_name' , default='' , type=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , ) parser.add_argument( '--tokenizer_name' , default='' , type=__lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , ) parser.add_argument( '--cache_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , ) parser.add_argument( '--data_subset' , type=__lowerCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , ) parser.add_argument( '--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' , default=0.9 , type=__lowerCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , ) parser.add_argument( '--masking_amount' , default=0.1 , type=__lowerCAmelCase , help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' , default='acc' , type=__lowerCAmelCase , help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' , default=1_28 , type=__lowerCAmelCase , help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) , ) parser.add_argument('--batch_size' , default=1 , type=__lowerCAmelCase , help='Batch size.' ) parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 ) parser.add_argument('--local_rank' , type=__lowerCAmelCase , default=-1 , help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' ) _snake_case : List[str] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _snake_case : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) _snake_case : Any = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _snake_case : Union[str, Any] = torch.device('cuda' , args.local_rank ) _snake_case : Dict = 1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _snake_case : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _snake_case : List[str] = nn.parallel.DistributedDataParallel( __lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCAmelCase ) elif args.n_gpu > 1: _snake_case : int = nn.DataParallel(__lowerCAmelCase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase ) torch.save(__lowerCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' , __lowerCAmelCase ) # Prepare dataset _snake_case : str = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _snake_case : Dict = (torch.from_numpy(__lowerCAmelCase ),) _snake_case : List[Any] = TensorDataset(*__lowerCAmelCase ) _snake_case : List[str] = RandomSampler(__lowerCAmelCase ) _snake_case : List[str] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _snake_case : Union[str, Any] = mask_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) prune_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": main()
652
1
import math import random def A__( __lowerCAmelCase , __lowerCAmelCase = False ): if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value lowercase_ : Union[str, Any] = 0.02 def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : List[Any] = float(2 * (random.randint(1 , 1_00 )) - 1 ) for _ in range(__lowerCAmelCase ): # Forward propagation _snake_case : Any = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? _snake_case : Dict = (expected / 1_00) - layer_a # Error delta _snake_case : Any = layer_1_error * sigmoid_function(__lowerCAmelCase , __lowerCAmelCase ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 1_00 if __name__ == "__main__": import doctest doctest.testmod() lowercase_ : Tuple = int(input('''Expected value: ''')) lowercase_ : List[str] = int(input('''Number of propagations: ''')) print(forward_propagation(expected, number_propagations))
652
def A__( __lowerCAmelCase ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError('only integers accepted as input' ) else: _snake_case : Any = str(abs(__lowerCAmelCase ) ) _snake_case : List[str] = [list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )] for index in range(len(__lowerCAmelCase ) ): num_transpositions[index].pop(__lowerCAmelCase ) return max( int(''.join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
652
1
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class lowercase ( a_ ): """simple docstring""" _UpperCamelCase : torch.FloatTensor _UpperCamelCase : torch.FloatTensor class lowercase ( a_ , a_ ): """simple docstring""" _UpperCamelCase : Union[str, Any] = 1 @register_to_config def __init__( self : Union[str, Any] , lowerCamelCase_ : int = 20_00 , lowerCamelCase_ : float = 0.15 , lowerCamelCase_ : float = 0.01 , lowerCamelCase_ : float = 1348.0 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : int = 1 , ): '''simple docstring''' _snake_case : Dict = sigma_max # setable values _snake_case : Union[str, Any] = None self.set_sigmas(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def __UpperCAmelCase ( self : int , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : Optional[int] = None ): '''simple docstring''' return sample def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : float = None , lowerCamelCase_ : Union[str, torch.device] = None ): '''simple docstring''' _snake_case : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps _snake_case : Optional[int] = torch.linspace(1 , lowerCamelCase_ , lowerCamelCase_ , device=lowerCamelCase_ ) def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : float = None , lowerCamelCase_ : float = None , lowerCamelCase_ : float = None ): '''simple docstring''' _snake_case : Dict = sigma_min if sigma_min is not None else self.config.sigma_min _snake_case : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max _snake_case : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : List[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) _snake_case : Tuple = torch.exp(torch.linspace(math.log(lowerCamelCase_ ) , math.log(lowerCamelCase_ ) , lowerCamelCase_ ) ) _snake_case : List[Any] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ): '''simple docstring''' return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , ) def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : int , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : bool = True , ): '''simple docstring''' if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) _snake_case : Tuple = timestep * torch.ones( sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) _snake_case : Dict = (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda _snake_case : Optional[int] = timesteps.to(self.discrete_sigmas.device ) _snake_case : Tuple = self.discrete_sigmas[timesteps].to(sample.device ) _snake_case : Optional[Any] = self.get_adjacent_sigma(lowerCamelCase_ , lowerCamelCase_ ).to(sample.device ) _snake_case : List[str] = torch.zeros_like(lowerCamelCase_ ) _snake_case : str = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods _snake_case : Optional[int] = diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): _snake_case : List[Any] = diffusion.unsqueeze(-1 ) _snake_case : Tuple = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of _snake_case : Optional[int] = randn_tensor( sample.shape , layout=sample.layout , generator=lowerCamelCase_ , device=sample.device , dtype=sample.dtype ) _snake_case : Tuple = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? _snake_case : Union[str, Any] = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowerCamelCase_ , prev_sample_mean=lowerCamelCase_ ) def __UpperCAmelCase ( self : Any , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : bool = True , ): '''simple docstring''' if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction _snake_case : Optional[Any] = randn_tensor(sample.shape , layout=sample.layout , generator=lowerCamelCase_ ).to(sample.device ) # compute step size from the model_output, the noise, and the snr _snake_case : Optional[int] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean() _snake_case : Any = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean() _snake_case : str = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 _snake_case : int = step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term _snake_case : Tuple = step_size.flatten() while len(step_size.shape ) < len(sample.shape ): _snake_case : Dict = step_size.unsqueeze(-1 ) _snake_case : Dict = sample + step_size * model_output _snake_case : Optional[Any] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCamelCase_ ) def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : torch.FloatTensor , ): '''simple docstring''' _snake_case : Union[str, Any] = timesteps.to(original_samples.device ) _snake_case : Optional[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps] _snake_case : Dict = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowerCamelCase_ ) * sigmas[:, None, None, None] ) _snake_case : int = noise + original_samples return noisy_samples def __len__( self : Dict ): '''simple docstring''' return self.config.num_train_timesteps
652
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask lowercase_ : Tuple = logging.getLogger(__name__) class lowercase ( a_ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Dict=-1 ): '''simple docstring''' _snake_case : str = label_idx def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[Split, str] ): '''simple docstring''' if isinstance(lowerCamelCase_ , lowerCamelCase_ ): _snake_case : Union[str, Any] = mode.value _snake_case : Optional[int] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' ) _snake_case : Dict = 1 _snake_case : List[str] = [] with open(lowerCamelCase_ , encoding='utf-8' ) as f: _snake_case : List[Any] = [] _snake_case : int = [] for line in f: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) ) guid_index += 1 _snake_case : Optional[Any] = [] _snake_case : Union[str, Any] = [] else: _snake_case : Tuple = line.split(' ' ) words.append(splits[0] ) if len(lowerCamelCase_ ) > 1: labels.append(splits[self.label_idx].replace('\n' , '' ) ) else: # Examples could have no label for mode = "test" labels.append('O' ) if words: examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) ) return examples def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ): '''simple docstring''' _snake_case : str = 0 for line in test_input_reader: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": writer.write(lowerCamelCase_ ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: _snake_case : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n' writer.write(lowerCamelCase_ ) else: logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] ) def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ): '''simple docstring''' if path: with open(lowerCamelCase_ , 'r' ) as f: _snake_case : Optional[int] = f.read().splitlines() if "O" not in labels: _snake_case : Optional[int] = ['O'] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class lowercase ( a_ ): """simple docstring""" def __init__( self : Optional[int] ): '''simple docstring''' super().__init__(label_idx=-2 ) def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ): '''simple docstring''' if path: with open(lowerCamelCase_ , 'r' ) as f: _snake_case : str = f.read().splitlines() if "O" not in labels: _snake_case : Union[str, Any] = ['O'] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class lowercase ( a_ ): """simple docstring""" def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[Split, str] ): '''simple docstring''' if isinstance(lowerCamelCase_ , lowerCamelCase_ ): _snake_case : str = mode.value _snake_case : List[str] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' ) _snake_case : Tuple = 1 _snake_case : List[str] = [] with open(lowerCamelCase_ , encoding='utf-8' ) as f: for sentence in parse_incr(lowerCamelCase_ ): _snake_case : List[str] = [] _snake_case : str = [] for token in sentence: words.append(token['form'] ) labels.append(token['upos'] ) assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) if words: examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) ) guid_index += 1 return examples def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ): '''simple docstring''' _snake_case : Dict = 0 for sentence in parse_incr(lowerCamelCase_ ): _snake_case : Optional[int] = preds_list[example_id] _snake_case : List[Any] = '' for token in sentence: out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) ''' out += "\n" writer.write(lowerCamelCase_ ) example_id += 1 def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ): '''simple docstring''' if path: with open(lowerCamelCase_ , 'r' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
652
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase_ : str = { '''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''], '''configuration_data2vec_text''': [ '''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecTextConfig''', '''Data2VecTextOnnxConfig''', ], '''configuration_data2vec_vision''': [ '''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecVisionConfig''', '''Data2VecVisionOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[Any] = [ '''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Data2VecAudioForAudioFrameClassification''', '''Data2VecAudioForCTC''', '''Data2VecAudioForSequenceClassification''', '''Data2VecAudioForXVector''', '''Data2VecAudioModel''', '''Data2VecAudioPreTrainedModel''', ] lowercase_ : Tuple = [ '''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Data2VecTextForCausalLM''', '''Data2VecTextForMaskedLM''', '''Data2VecTextForMultipleChoice''', '''Data2VecTextForQuestionAnswering''', '''Data2VecTextForSequenceClassification''', '''Data2VecTextForTokenClassification''', '''Data2VecTextModel''', '''Data2VecTextPreTrainedModel''', ] lowercase_ : List[Any] = [ '''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Data2VecVisionForImageClassification''', '''Data2VecVisionForMaskedImageModeling''', '''Data2VecVisionForSemanticSegmentation''', '''Data2VecVisionModel''', '''Data2VecVisionPreTrainedModel''', ] if is_tf_available(): lowercase_ : List[str] = [ '''TFData2VecVisionForImageClassification''', '''TFData2VecVisionForSemanticSegmentation''', '''TFData2VecVisionModel''', '''TFData2VecVisionPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys lowercase_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
652
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class lowercase ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __UpperCAmelCase ( self : Optional[int] ): '''simple docstring''' _snake_case : Tuple = 1 _snake_case : str = 3 _snake_case : List[str] = (32, 32) _snake_case : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ ) return image @property def __UpperCAmelCase ( self : str ): '''simple docstring''' torch.manual_seed(0 ) _snake_case : Union[str, Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) return model @property def __UpperCAmelCase ( self : Dict ): '''simple docstring''' torch.manual_seed(0 ) _snake_case : Optional[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' torch.manual_seed(0 ) _snake_case : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModel(lowerCamelCase_ ) @property def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' def extract(*lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ): class lowercase : """simple docstring""" def __init__( self : Tuple ): '''simple docstring''' _snake_case : List[str] = torch.ones([0] ) def __UpperCAmelCase ( self : int , lowerCamelCase_ : Tuple ): '''simple docstring''' self.pixel_values.to(lowerCamelCase_ ) return self return Out() return extract def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator _snake_case : int = self.dummy_cond_unet _snake_case : str = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , ) _snake_case : Union[str, Any] = self.dummy_vae _snake_case : Optional[Any] = self.dummy_text_encoder _snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # make sure here that pndm scheduler skips prk _snake_case : Union[str, Any] = StableDiffusionPipeline( unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , ) _snake_case : str = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : List[str] = 'A painting of a squirrel eating a burger' _snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) _snake_case : Optional[int] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' ) _snake_case : Union[str, Any] = output.images _snake_case : List[str] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) _snake_case : Any = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0] _snake_case : Tuple = image[0, -3:, -3:, -1] _snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _snake_case : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' _snake_case : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator _snake_case : List[str] = self.dummy_cond_unet _snake_case : List[str] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) _snake_case : int = self.dummy_vae _snake_case : List[Any] = self.dummy_text_encoder _snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # make sure here that pndm scheduler skips prk _snake_case : Any = StableDiffusionPipeline( unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , ) _snake_case : Union[str, Any] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : str = 'A painting of a squirrel eating a burger' _snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) _snake_case : Tuple = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' ) _snake_case : Optional[Any] = output.images _snake_case : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) _snake_case : Tuple = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0] _snake_case : Dict = image[0, -3:, -3:, -1] _snake_case : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _snake_case : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : Union[str, Any] = StableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowerCamelCase_ ) assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) assert isinstance(pipe.scheduler , lowerCamelCase_ ) assert pipe.safety_checker is None _snake_case : Dict = pipe('example prompt' , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase_ ) _snake_case : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ ) # sanity check that the pipeline still works assert pipe.safety_checker is None _snake_case : Union[str, Any] = pipe('example prompt' , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' _snake_case : Union[str, Any] = self.dummy_cond_unet _snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) _snake_case : Any = self.dummy_vae _snake_case : Optional[Any] = self.dummy_text_encoder _snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # put models in fp16 _snake_case : str = unet.half() _snake_case : Union[str, Any] = vae.half() _snake_case : Dict = bert.half() # make sure here that pndm scheduler skips prk _snake_case : List[str] = StableDiffusionPipeline( unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , ) _snake_case : List[str] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : Tuple = 'A painting of a squirrel eating a burger' _snake_case : Optional[int] = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class lowercase ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' _snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ ) _snake_case : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) _snake_case : Any = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : Optional[int] = ( 'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle' ' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with' ' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and' ' children from bahnhof zoo, detailed ' ) _snake_case : List[str] = 40_03_66_03_46 _snake_case : int = 7 # without safety guidance (sld_guidance_scale = 0) _snake_case : Union[str, Any] = torch.manual_seed(lowerCamelCase_ ) _snake_case : Union[str, Any] = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) _snake_case : str = output.images _snake_case : Dict = image[0, -3:, -3:, -1] _snake_case : Optional[int] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 # without safety guidance (strong configuration) _snake_case : Tuple = torch.manual_seed(lowerCamelCase_ ) _snake_case : int = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) _snake_case : Tuple = output.images _snake_case : int = image[0, -3:, -3:, -1] _snake_case : List[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : str = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ ) _snake_case : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) _snake_case : Any = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : Union[str, Any] = 'padme amidala taking a bath artwork, safe for work, no nudity' _snake_case : Optional[Any] = 27_34_97_17_55 _snake_case : Union[str, Any] = 7 _snake_case : Dict = torch.manual_seed(lowerCamelCase_ ) _snake_case : Tuple = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) _snake_case : Any = output.images _snake_case : int = image[0, -3:, -3:, -1] _snake_case : str = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 _snake_case : Optional[Any] = torch.manual_seed(lowerCamelCase_ ) _snake_case : Any = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) _snake_case : str = output.images _snake_case : List[str] = image[0, -3:, -3:, -1] _snake_case : Union[str, Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' _snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' ) _snake_case : Optional[int] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : List[Any] = ( 'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.' ' leyendecker' ) _snake_case : Union[str, Any] = 10_44_35_52_34 _snake_case : Dict = 12 _snake_case : Optional[int] = torch.manual_seed(lowerCamelCase_ ) _snake_case : Any = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) _snake_case : Optional[int] = output.images _snake_case : int = image[0, -3:, -3:, -1] _snake_case : Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7 _snake_case : List[Any] = torch.manual_seed(lowerCamelCase_ ) _snake_case : Optional[int] = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) _snake_case : str = output.images _snake_case : List[str] = image[0, -3:, -3:, -1] _snake_case : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] ) assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
652
1
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase_ : Dict = logging.get_logger(__name__) lowercase_ : Dict = { '''nielsr/canine-s''': 2048, } # Unicode defines 1,114,112 total “codepoints” lowercase_ : Union[str, Any] = 111_4112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py lowercase_ : int = 0 lowercase_ : List[str] = 0Xe_000 lowercase_ : Tuple = 0Xe_001 lowercase_ : Optional[Any] = 0Xe_002 lowercase_ : List[Any] = 0Xe_003 lowercase_ : int = 0Xe_004 # Maps special codepoints to human-readable names. lowercase_ : Dict[int, str] = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. lowercase_ : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class lowercase ( a_ ): """simple docstring""" _UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Optional[Any] , lowerCamelCase_ : Union[str, Any]=chr(lowerCamelCase_ ) , lowerCamelCase_ : Optional[int]=chr(lowerCamelCase_ ) , lowerCamelCase_ : Union[str, Any]=chr(lowerCamelCase_ ) , lowerCamelCase_ : str=chr(lowerCamelCase_ ) , lowerCamelCase_ : Optional[int]=chr(lowerCamelCase_ ) , lowerCamelCase_ : Tuple=chr(lowerCamelCase_ ) , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : List[str]=20_48 , **lowerCamelCase_ : Dict , ): '''simple docstring''' _snake_case : List[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token _snake_case : Optional[int] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token _snake_case : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token _snake_case : List[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token _snake_case : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _snake_case : Optional[int] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token super().__init__( bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , model_max_length=lowerCamelCase_ , **lowerCamelCase_ , ) # Creates a mapping for looking up the IDs of special symbols. _snake_case : Dict[str, int] = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): _snake_case : Union[str, Any] = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. _snake_case : Dict[int, str] = { codepoint: name for name, codepoint in self._special_codepoints.items() } _snake_case : List[str] = UNICODE_VOCAB_SIZE _snake_case : Optional[int] = len(self._special_codepoints ) @property def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' return self._unicode_vocab_size def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : str ): '''simple docstring''' return list(lowerCamelCase_ ) def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : str ): '''simple docstring''' try: return ord(lowerCamelCase_ ) except TypeError: raise ValueError(f'''invalid token: \'{token}\'''' ) def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : int ): '''simple docstring''' try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(lowerCamelCase_ ) except TypeError: raise ValueError(f'''invalid id: {index}''' ) def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : Any ): '''simple docstring''' return "".join(lowerCamelCase_ ) def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' _snake_case : List[Any] = [self.sep_token_id] _snake_case : Union[str, Any] = [self.cls_token_id] _snake_case : List[str] = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) _snake_case : Dict = [1] + ([0] * len(lowerCamelCase_ )) + [1] if token_ids_a is not None: result += ([0] * len(lowerCamelCase_ )) + [1] return result def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' _snake_case : Optional[int] = [self.sep_token_id] _snake_case : Optional[int] = [self.cls_token_id] _snake_case : Dict = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): '''simple docstring''' return ()
652
import functools def A__( __lowerCAmelCase , __lowerCAmelCase ): # Validation if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for day in days ): raise ValueError('The parameter days should be a list of integers' ) if len(__lowerCAmelCase ) != 3 or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for cost in costs ): raise ValueError('The parameter costs should be a list of three integers' ) if len(__lowerCAmelCase ) == 0: return 0 if min(__lowerCAmelCase ) <= 0: raise ValueError('All days elements should be greater than 0' ) if max(__lowerCAmelCase ) >= 3_66: raise ValueError('All days elements should be less than 366' ) _snake_case : Optional[int] = set(__lowerCAmelCase ) @functools.cache def dynamic_programming(__lowerCAmelCase ) -> int: if index > 3_65: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
652
1
import comet # From: unbabel-comet import torch import datasets lowercase_ : str = datasets.logging.get_logger(__name__) lowercase_ : Optional[Any] = '''\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = "{COMET}: A Neural Framework for {MT} Evaluation", author = "Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.213", pages = "2685--2702", } ''' lowercase_ : Union[str, Any] = '''\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. ''' lowercase_ : str = ''' COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric(\'comet\') >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."] >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"] >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results["scores"]]) [0.19, 0.92] ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): """simple docstring""" def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='https://unbabel.github.io/COMET/html/index.html' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'sources': datasets.Value('string' , id='sequence' ), 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/Unbabel/COMET'] , reference_urls=[ 'https://github.com/Unbabel/COMET', 'https://www.aclweb.org/anthology/2020.emnlp-main.213/', 'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6', ] , ) def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : List[Any] ): '''simple docstring''' if self.config_name == "default": _snake_case : int = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) ) else: _snake_case : Optional[int] = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : Dict=False ): '''simple docstring''' if gpus is None: _snake_case : str = 1 if torch.cuda.is_available() else 0 _snake_case : Tuple = {'src': sources, 'mt': predictions, 'ref': references} _snake_case : List[str] = [dict(zip(lowerCamelCase_ , lowerCamelCase_ ) ) for t in zip(*data.values() )] _snake_case , _snake_case : str = self.scorer.predict(lowerCamelCase_ , gpus=lowerCamelCase_ , progress_bar=lowerCamelCase_ ) return {"mean_score": mean_score, "scores": scores}
652
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor lowercase_ : str = logging.get_logger(__name__) class lowercase ( a_ ): """simple docstring""" def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ): '''simple docstring''' warnings.warn( 'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , ) super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
652
1
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): # Initialise PyTorch model _snake_case : Optional[int] = BertConfig.from_json_file(__lowerCAmelCase ) print(F'''Building PyTorch model from configuration: {config}''' ) _snake_case : List[str] = BertForPreTraining(__lowerCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __lowerCAmelCase ) if __name__ == "__main__": lowercase_ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowercase_ : List[str] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
652
from math import factorial def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): if successes > trials: raise ValueError('successes must be lower or equal to trials' ) if trials < 0 or successes < 0: raise ValueError('the function is defined for non-negative integers' ) if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError('the function is defined for non-negative integers' ) if not 0 < prob < 1: raise ValueError('prob has to be in range of 1 - 0' ) _snake_case : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! _snake_case : List[Any] = float(factorial(__lowerCAmelCase ) ) coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('''Probability of 2 successes out of 4 trails''') print('''with probability of 0.75 is:''', end=''' ''') print(binomial_distribution(2, 4, 0.75))
652
1
from math import pi, sqrt def A__( __lowerCAmelCase ): if num <= 0: raise ValueError('math domain error' ) if num > 171.5: raise OverflowError('math range error' ) elif num - int(__lowerCAmelCase ) not in (0, 0.5): raise NotImplementedError('num must be an integer or a half-integer' ) elif num == 0.5: return sqrt(__lowerCAmelCase ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def A__( ): assert gamma(0.5 ) == sqrt(__lowerCAmelCase ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() lowercase_ : Any = 1.0 while num: lowercase_ : Union[str, Any] = float(input('''Gamma of: ''')) print(F'''gamma({num}) = {gamma(num)}''') print('''\nEnter 0 to exit...''')
652
lowercase_ : Tuple = ''' # Installazione di Transformers ! pip install transformers datasets # Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git ''' lowercase_ : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] lowercase_ : str = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
652
1
from math import log from scipy.constants import Boltzmann, physical_constants lowercase_ : str = 300 # TEMPERATURE (unit = K) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ): if donor_conc <= 0: raise ValueError('Donor concentration should be positive' ) elif acceptor_conc <= 0: raise ValueError('Acceptor concentration should be positive' ) elif intrinsic_conc <= 0: raise ValueError('Intrinsic concentration should be positive' ) elif donor_conc <= intrinsic_conc: raise ValueError( 'Donor concentration should be greater than intrinsic concentration' ) elif acceptor_conc <= intrinsic_conc: raise ValueError( 'Acceptor concentration should be greater than intrinsic concentration' ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
652
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase_ : Optional[Any] = { '''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''], '''tokenization_roc_bert''': ['''RoCBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Tuple = [ '''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoCBertForCausalLM''', '''RoCBertForMaskedLM''', '''RoCBertForMultipleChoice''', '''RoCBertForPreTraining''', '''RoCBertForQuestionAnswering''', '''RoCBertForSequenceClassification''', '''RoCBertForTokenClassification''', '''RoCBertLayer''', '''RoCBertModel''', '''RoCBertPreTrainedModel''', '''load_tf_weights_in_roc_bert''', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
652
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowercase_ : List[str] = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : int = ['''SpeechEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Dict = ['''FlaxSpeechEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys lowercase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
652
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase_ : Optional[int] = { '''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Any = [ '''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MegaForCausalLM''', '''MegaForMaskedLM''', '''MegaForMultipleChoice''', '''MegaForQuestionAnswering''', '''MegaForSequenceClassification''', '''MegaForTokenClassification''', '''MegaModel''', '''MegaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
652
1
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class lowercase ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : int , lowerCamelCase_ : Any ): '''simple docstring''' _snake_case : List[str] = 3 _snake_case : int = 2_50 _snake_case : Union[str, Any] = ids_tensor((batch_size, length) , lowerCamelCase_ ) _snake_case : Any = torch.ones((batch_size, length) , device=lowerCamelCase_ , dtype=torch.float ) / length return input_ids, scores def __UpperCAmelCase ( self : Dict ): '''simple docstring''' _snake_case , _snake_case : Tuple = self._get_tensors(5 ) _snake_case : Dict = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) ) _snake_case , _snake_case : Optional[int] = self._get_tensors(9 ) self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) ) _snake_case , _snake_case : str = self._get_tensors(10 ) self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) ) def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _snake_case : List[Any] = MaxLengthCriteria(max_length=10 ) _snake_case , _snake_case : int = self._get_tensors(5 ) self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) ) _snake_case , _snake_case : Union[str, Any] = self._get_tensors(9 ) self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) ) _snake_case , _snake_case : Dict = self._get_tensors(10 ) self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) ) def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : Union[str, Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) _snake_case , _snake_case : List[Any] = self._get_tensors(5 ) self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) ) _snake_case , _snake_case : Any = self._get_tensors(9 ) self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) ) _snake_case , _snake_case : str = self._get_tensors(10 ) self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) ) _snake_case : List[str] = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def __UpperCAmelCase ( self : Dict ): '''simple docstring''' _snake_case , _snake_case : str = self._get_tensors(5 ) _snake_case : List[str] = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) ) _snake_case : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) ) def __UpperCAmelCase ( self : Dict ): '''simple docstring''' validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(lowerCamelCase_ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) _snake_case : List[str] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(lowerCamelCase_ ) , 1 )
652
import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) lowercase_ : Optional[Any] = pytest.mark.integration @pytest.mark.parametrize('path' , ['paws', 'csv'] ) def A__( __lowerCAmelCase , __lowerCAmelCase ): inspect_dataset(__lowerCAmelCase , __lowerCAmelCase ) _snake_case : str = path + '.py' assert script_name in os.listdir(__lowerCAmelCase ) assert "__pycache__" not in os.listdir(__lowerCAmelCase ) @pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.parametrize('path' , ['accuracy'] ) def A__( __lowerCAmelCase , __lowerCAmelCase ): inspect_metric(__lowerCAmelCase , __lowerCAmelCase ) _snake_case : Tuple = path + '.py' assert script_name in os.listdir(__lowerCAmelCase ) assert "__pycache__" not in os.listdir(__lowerCAmelCase ) @pytest.mark.parametrize( 'path, config_name, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Union[str, Any] = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): with pytest.raises(__lowerCAmelCase ): get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase ) @pytest.mark.parametrize( 'path, expected' , [ ('squad', 'plain_text'), ('acronym_identification', 'default'), ('lhoestq/squad', 'plain_text'), ('lhoestq/test', 'default'), ('lhoestq/demo1', 'lhoestq--demo1'), ('dalle-mini/wit', 'dalle-mini--wit'), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Optional[Any] = get_dataset_config_names(__lowerCAmelCase ) assert expected in config_names @pytest.mark.parametrize( 'path, expected_configs, expected_splits_in_first_config' , [ ('squad', ['plain_text'], ['train', 'validation']), ('dalle-mini/wit', ['dalle-mini--wit'], ['train']), ('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : List[Any] = get_dataset_infos(__lowerCAmelCase ) assert list(infos.keys() ) == expected_configs _snake_case : Any = expected_configs[0] assert expected_config in infos _snake_case : str = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( 'path, expected_config, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Any = get_dataset_infos(__lowerCAmelCase ) assert expected_config in infos _snake_case : Any = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): with pytest.raises(__lowerCAmelCase ): get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
652
1
from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor lowercase_ : str = transforms.Compose( [ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def A__( __lowerCAmelCase ): if isinstance(__lowerCAmelCase , torch.Tensor ): return image elif isinstance(__lowerCAmelCase , PIL.Image.Image ): _snake_case : str = [image] _snake_case : Dict = [trans(img.convert('RGB' ) ) for img in image] _snake_case : Optional[Any] = torch.stack(__lowerCAmelCase ) return image class lowercase ( a_ ): """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] ): '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM _snake_case : List[str] = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ ) def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] ): '''simple docstring''' if strength < 0 or strength > 1: raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' ) def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Any ): '''simple docstring''' _snake_case : Optional[int] = min(int(num_inference_steps * strength ) , lowerCamelCase_ ) _snake_case : List[Any] = max(num_inference_steps - init_timestep , 0 ) _snake_case : Union[str, Any] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def __UpperCAmelCase ( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : int=None ): '''simple docstring''' if not isinstance(lowerCamelCase_ , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase_ )}''' ) _snake_case : Optional[int] = image.to(device=lowerCamelCase_ , dtype=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) _snake_case : Union[str, Any] = init_latents.shape _snake_case : List[Any] = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=lowerCamelCase_ , dtype=lowerCamelCase_ ) # get latents print('add noise to latents at timestep' , lowerCamelCase_ ) _snake_case : Optional[Any] = self.scheduler.add_noise(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) _snake_case : List[str] = init_latents return latents @torch.no_grad() def __call__( self : str , lowerCamelCase_ : Union[torch.FloatTensor, PIL.Image.Image] = None , lowerCamelCase_ : float = 0.8 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , ): '''simple docstring''' self.check_inputs(lowerCamelCase_ ) # 2. Preprocess image _snake_case : Any = preprocess(lowerCamelCase_ ) # 3. set timesteps self.scheduler.set_timesteps(lowerCamelCase_ , device=self.device ) _snake_case , _snake_case : Optional[int] = self.get_timesteps(lowerCamelCase_ , lowerCamelCase_ , self.device ) _snake_case : Optional[int] = timesteps[:1].repeat(lowerCamelCase_ ) # 4. Prepare latent variables _snake_case : str = self.prepare_latents(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.unet.dtype , self.device , lowerCamelCase_ ) _snake_case : Any = latents # 5. Denoising loop for t in self.progress_bar(lowerCamelCase_ ): # 1. predict noise model_output _snake_case : Any = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 _snake_case : str = self.scheduler.step( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , eta=lowerCamelCase_ , use_clipped_model_output=lowerCamelCase_ , generator=lowerCamelCase_ , ).prev_sample _snake_case : int = (image / 2 + 0.5).clamp(0 , 1 ) _snake_case : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _snake_case : str = self.numpy_to_pil(lowerCamelCase_ ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=lowerCamelCase_ )
652
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): # Initialise PyTorch model _snake_case : Optional[int] = BertConfig.from_json_file(__lowerCAmelCase ) print(F'''Building PyTorch model from configuration: {config}''' ) _snake_case : List[str] = BertForPreTraining(__lowerCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __lowerCAmelCase ) if __name__ == "__main__": lowercase_ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowercase_ : List[str] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
652
1
def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Union[str, Any] = len(__lowerCAmelCase ) _snake_case : Union[str, Any] = len(__lowerCAmelCase ) _snake_case : Optional[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] _snake_case : Optional[int] = True for i in range(__lowerCAmelCase ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: _snake_case : List[str] = True if a[i].islower(): _snake_case : Optional[int] = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
652
import itertools import math def A__( __lowerCAmelCase ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A__( ): _snake_case : Optional[Any] = 2 while True: if is_prime(__lowerCAmelCase ): yield num num += 1 def A__( __lowerCAmelCase = 1_00_01 ): return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) ) if __name__ == "__main__": print(F'''{solution() = }''')
652
1
lowercase_ : Any = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' lowercase_ : Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] lowercase_ : List[Any] = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
652
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowercase_ : Dict = '''bert-base-cased''' lowercase_ : Any = '''google/pegasus-xsum''' lowercase_ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] lowercase_ : Tuple = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] lowercase_ : Any = '''patrickvonplaten/t5-tiny-random''' lowercase_ : List[Any] = '''sshleifer/bart-tiny-random''' lowercase_ : Dict = '''sshleifer/tiny-mbart''' lowercase_ : str = '''sshleifer/tiny-marian-en-de''' def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : str = '\n'.join(__lowerCAmelCase ) Path(__lowerCAmelCase ).open('w' ).writelines(__lowerCAmelCase ) def A__( __lowerCAmelCase ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.source''' ) , __lowerCAmelCase ) _dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.target''' ) , __lowerCAmelCase ) return tmp_dir class lowercase ( a_ ): """simple docstring""" @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ): '''simple docstring''' _snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) _snake_case : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) _snake_case : Optional[int] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES ) _snake_case : Any = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES ) _snake_case : Dict = 4 _snake_case : Any = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated _snake_case , _snake_case : Optional[Any] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error. _snake_case : int = SeqaSeqDataset( lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , ) _snake_case : List[str] = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place _snake_case : List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str ): '''simple docstring''' _snake_case : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) _snake_case : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) _snake_case : Dict = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES ) _snake_case : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES ) _snake_case : Union[str, Any] = 4 _snake_case : Optional[int] = LegacySeqaSeqDataset( lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=20 , max_target_length=lowerCamelCase_ , ) _snake_case : Dict = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def __UpperCAmelCase ( self : Dict ): '''simple docstring''' _snake_case : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' ) _snake_case : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) _snake_case : Any = tmp_dir.joinpath('train.source' ).open().readlines() _snake_case : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(lowerCamelCase_ , lowerCamelCase_ , 1_28 , lowerCamelCase_ ) _snake_case : Tuple = {x.name for x in tmp_dir.iterdir()} _snake_case : Dict = {x.name for x in save_dir.iterdir()} _snake_case : str = save_dir.joinpath('train.source' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(lowerCamelCase_ ) < len(lowerCamelCase_ ) assert len(lowerCamelCase_ ) == 1 assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' ) def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' if not FAIRSEQ_AVAILABLE: return _snake_case , _snake_case , _snake_case : int = self._get_dataset(max_len=64 ) _snake_case : List[str] = 64 _snake_case : str = ds.make_dynamic_sampler(lowerCamelCase_ , required_batch_size_multiple=lowerCamelCase_ ) _snake_case : Optional[Any] = [len(lowerCamelCase_ ) for x in batch_sampler] assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples _snake_case : Union[str, Any] = DataLoader(lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 ) _snake_case : List[Any] = [] _snake_case : List[Any] = [] for batch in data_loader: _snake_case : Any = batch['input_ids'].shape _snake_case : str = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple _snake_case : int = np.product(batch['input_ids'].shape ) num_src_per_batch.append(lowerCamelCase_ ) if num_src_tokens > (max_tokens * 1.1): failures.append(lowerCamelCase_ ) assert num_src_per_batch[0] == max(lowerCamelCase_ ) if failures: raise AssertionError(f'''too many tokens in {len(lowerCamelCase_ )} batches''' ) def __UpperCAmelCase ( self : Optional[int] ): '''simple docstring''' _snake_case , _snake_case , _snake_case : str = self._get_dataset(max_len=5_12 ) _snake_case : Optional[Any] = 2 _snake_case : Dict = ds.make_sortish_sampler(lowerCamelCase_ , shuffle=lowerCamelCase_ ) _snake_case : int = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 ) _snake_case : str = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase_ ) _snake_case : Tuple = tokenizer.pad_token_id def count_pad_tokens(lowerCamelCase_ : List[str] , lowerCamelCase_ : Any="input_ids" ): return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) < sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) ) assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple=10_00 , lowerCamelCase_ : Tuple=1_28 ): '''simple docstring''' if os.getenv('USE_REAL_DATA' , lowerCamelCase_ ): _snake_case : Dict = 'examples/seq2seq/wmt_en_ro' _snake_case : List[Any] = max_len * 2 * 64 if not Path(lowerCamelCase_ ).joinpath('train.len' ).exists(): save_len_file(lowerCamelCase_ , lowerCamelCase_ ) else: _snake_case : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro' _snake_case : List[Any] = max_len * 4 save_len_file(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) _snake_case : str = SeqaSeqDataset( lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , n_obs=lowerCamelCase_ , ) return ds, max_tokens, tokenizer def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _snake_case , _snake_case , _snake_case : Any = self._get_dataset() _snake_case : List[str] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase_ ) ) _snake_case : Optional[Any] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase_ ) ) assert idsa.intersection(lowerCamelCase_ ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] ): '''simple docstring''' _snake_case : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ ) if tok_name == MBART_TINY: _snake_case : int = SeqaSeqDataset( lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , ) _snake_case : Optional[Any] = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: _snake_case : Tuple = SeqaSeqDataset( lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , ) _snake_case : List[Any] = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
652
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ : Optional[int] = { '''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[int] = [ '''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PegasusXForConditionalGeneration''', '''PegasusXModel''', '''PegasusXPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys lowercase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
652
from __future__ import annotations def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Dict = list(range(len(__lowerCAmelCase ) ) ) _snake_case : Optional[int] = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )] index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=__lowerCAmelCase ) _snake_case : float = 0 _snake_case : list[float] = [0] * len(__lowerCAmelCase ) for i in index: if weight[i] <= capacity: _snake_case : List[Any] = 1 max_value += value[i] capacity -= weight[i] else: _snake_case : Any = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
652
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ : Optional[Any] = logging.get_logger(__name__) lowercase_ : Optional[int] = { '''edbeeching/decision-transformer-gym-hopper-medium''': ( '''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json''' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class lowercase ( a_ ): """simple docstring""" _UpperCamelCase : Tuple = "decision_transformer" _UpperCamelCase : List[Any] = ["past_key_values"] _UpperCamelCase : Union[str, Any] = { "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Union[str, Any] , lowerCamelCase_ : Any=17 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : List[str]=1_28 , lowerCamelCase_ : Dict=40_96 , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : Optional[int]=1 , lowerCamelCase_ : Tuple=10_24 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : Dict="relu" , lowerCamelCase_ : int=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : Union[str, Any]=1e-5 , lowerCamelCase_ : Tuple=0.02 , lowerCamelCase_ : int=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[Any]=5_02_56 , lowerCamelCase_ : Any=5_02_56 , lowerCamelCase_ : str=False , lowerCamelCase_ : List[str]=False , **lowerCamelCase_ : Any , ): '''simple docstring''' _snake_case : Dict = state_dim _snake_case : int = act_dim _snake_case : List[Any] = hidden_size _snake_case : str = max_ep_len _snake_case : Union[str, Any] = action_tanh _snake_case : Any = vocab_size _snake_case : int = n_positions _snake_case : Any = n_layer _snake_case : List[Any] = n_head _snake_case : List[Any] = n_inner _snake_case : Dict = activation_function _snake_case : List[Any] = resid_pdrop _snake_case : str = embd_pdrop _snake_case : Any = attn_pdrop _snake_case : List[str] = layer_norm_epsilon _snake_case : Any = initializer_range _snake_case : Optional[Any] = scale_attn_weights _snake_case : List[Any] = use_cache _snake_case : int = scale_attn_by_inverse_layer_idx _snake_case : int = reorder_and_upcast_attn _snake_case : List[str] = bos_token_id _snake_case : str = eos_token_id super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
652
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase_ : Any = { '''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''], '''tokenization_canine''': ['''CanineTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[Any] = [ '''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CanineForMultipleChoice''', '''CanineForQuestionAnswering''', '''CanineForSequenceClassification''', '''CanineForTokenClassification''', '''CanineLayer''', '''CanineModel''', '''CaninePreTrainedModel''', '''load_tf_weights_in_canine''', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
652
1
import collections import importlib.util import os import re from pathlib import Path lowercase_ : str = '''src/transformers''' # Matches is_xxx_available() lowercase_ : List[Any] = re.compile(r'''is\_([a-z_]*)_available()''') # Catches a one-line _import_struct = {xxx} lowercase_ : Any = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowercase_ : List[Any] = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''') # Catches a line if not is_foo_available lowercase_ : List[Any] = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''') # Catches a line _import_struct["bla"].append("foo") lowercase_ : List[str] = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowercase_ : List[str] = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''') # Catches a line with an object between quotes and a comma: "MyModel", lowercase_ : List[str] = re.compile('''^\s+"([^"]+)",''') # Catches a line with objects between brackets only: ["foo", "bar"], lowercase_ : List[str] = re.compile('''^\s+\[([^\]]+)\]''') # Catches a line with from foo import bar, bla, boo lowercase_ : Optional[Any] = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') # Catches a line with try: lowercase_ : List[Any] = re.compile(r'''^\s*try:''') # Catches a line with else: lowercase_ : List[str] = re.compile(r'''^\s*else:''') def A__( __lowerCAmelCase ): if _re_test_backend.search(__lowerCAmelCase ) is None: return None _snake_case : Tuple = [b[0] for b in _re_backend.findall(__lowerCAmelCase )] backends.sort() return "_and_".join(__lowerCAmelCase ) def A__( __lowerCAmelCase ): with open(__lowerCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f: _snake_case : Dict = f.readlines() _snake_case : Optional[Any] = 0 while line_index < len(__lowerCAmelCase ) and not lines[line_index].startswith('_import_structure = {' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(__lowerCAmelCase ): return None # First grab the objects without a specific backend in _import_structure _snake_case : List[str] = [] while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None: _snake_case : List[Any] = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(__lowerCAmelCase ): _snake_case : str = _re_one_line_import_struct.search(__lowerCAmelCase ).groups()[0] _snake_case : Tuple = re.findall('\[([^\]]+)\]' , __lowerCAmelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(', ' )] ) line_index += 1 continue _snake_case : Optional[int] = _re_import_struct_key_value.search(__lowerCAmelCase ) if single_line_import_search is not None: _snake_case : List[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(__lowerCAmelCase ) > 0] objects.extend(__lowerCAmelCase ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) line_index += 1 _snake_case : Union[str, Any] = {'none': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('if TYPE_CHECKING' ): # If the line is an if not is_backend_available, we grab all objects associated. _snake_case : Any = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _snake_case : Any = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _snake_case : Tuple = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ): _snake_case : List[str] = lines[line_index] if _re_import_struct_add_one.search(__lowerCAmelCase ) is not None: objects.append(_re_import_struct_add_one.search(__lowerCAmelCase ).groups()[0] ) elif _re_import_struct_add_many.search(__lowerCAmelCase ) is not None: _snake_case : List[Any] = _re_import_struct_add_many.search(__lowerCAmelCase ).groups()[0].split(', ' ) _snake_case : str = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0] objects.extend(__lowerCAmelCase ) elif _re_between_brackets.search(__lowerCAmelCase ) is not None: _snake_case : Tuple = _re_between_brackets.search(__lowerCAmelCase ).groups()[0].split(', ' ) _snake_case : int = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0] objects.extend(__lowerCAmelCase ) elif _re_quote_object.search(__lowerCAmelCase ) is not None: objects.append(_re_quote_object.search(__lowerCAmelCase ).groups()[0] ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) elif line.startswith(' ' * 12 + '"' ): objects.append(line[13:-3] ) line_index += 1 _snake_case : Optional[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend _snake_case : List[Any] = [] while ( line_index < len(__lowerCAmelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('else' ) ): _snake_case : Optional[Any] = lines[line_index] _snake_case : int = _re_import.search(__lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 8 ): objects.append(line[8:-2] ) line_index += 1 _snake_case : Tuple = {'none': objects} # Let's continue with backend-specific objects while line_index < len(__lowerCAmelCase ): # If the line is an if is_backend_available, we grab all objects associated. _snake_case : int = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _snake_case : int = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _snake_case : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ): _snake_case : List[Any] = lines[line_index] _snake_case : List[Any] = _re_import.search(__lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 12 ): objects.append(line[12:-2] ) line_index += 1 _snake_case : Optional[Any] = objects else: line_index += 1 return import_dict_objects, type_hint_objects def A__( __lowerCAmelCase , __lowerCAmelCase ): def find_duplicates(__lowerCAmelCase ): return [k for k, v in collections.Counter(__lowerCAmelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] _snake_case : Union[str, Any] = [] for key in import_dict_objects.keys(): _snake_case : Any = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) _snake_case : List[Any] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): _snake_case : List[Any] = 'base imports' if key == 'none' else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def A__( ): _snake_case : List[str] = [] for root, _, files in os.walk(__lowerCAmelCase ): if "__init__.py" in files: _snake_case : Optional[int] = os.path.join(__lowerCAmelCase , '__init__.py' ) _snake_case : Tuple = parse_init(__lowerCAmelCase ) if objects is not None: _snake_case : List[Any] = analyze_results(*__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: _snake_case : Union[str, Any] = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('\n'.join(__lowerCAmelCase ) ) if len(__lowerCAmelCase ) > 0: raise ValueError('\n\n'.join(__lowerCAmelCase ) ) def A__( ): _snake_case : Tuple = [] for path, directories, files in os.walk(__lowerCAmelCase ): for folder in directories: # Ignore private modules if folder.startswith('_' ): directories.remove(__lowerCAmelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(__lowerCAmelCase ) / folder).glob('*.py' ) ) ) == 0: continue _snake_case : str = str((Path(__lowerCAmelCase ) / folder).relative_to(__lowerCAmelCase ) ) _snake_case : List[Any] = short_path.replace(os.path.sep , '.' ) submodules.append(__lowerCAmelCase ) for fname in files: if fname == "__init__.py": continue _snake_case : List[str] = str((Path(__lowerCAmelCase ) / fname).relative_to(__lowerCAmelCase ) ) _snake_case : str = short_path.replace('.py' , '' ).replace(os.path.sep , '.' ) if len(submodule.split('.' ) ) == 1: submodules.append(__lowerCAmelCase ) return submodules lowercase_ : Dict = [ '''convert_pytorch_checkpoint_to_tf2''', '''modeling_flax_pytorch_utils''', ] def A__( ): # This is to make sure the transformers module imported is the one in the repo. _snake_case : Dict = importlib.util.spec_from_file_location( 'transformers' , os.path.join(__lowerCAmelCase , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) _snake_case : Any = spec.loader.load_module() _snake_case : Tuple = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(__lowerCAmelCase ) > 0: _snake_case : str = '\n'.join(F'''- {module}''' for module in module_not_registered ) raise ValueError( 'The following submodules are not properly registered in the main init of Transformers:\n' F'''{list_of_modules}\n''' 'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' ) if __name__ == "__main__": check_all_inits() check_submodules()
652
import math def A__( __lowerCAmelCase ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A__( __lowerCAmelCase = 1_00_01 ): try: _snake_case : int = int(__lowerCAmelCase ) except (TypeError, ValueError): raise TypeError('Parameter nth must be int or castable to int.' ) from None if nth <= 0: raise ValueError('Parameter nth must be greater than or equal to one.' ) _snake_case : list[int] = [] _snake_case : List[Any] = 2 while len(__lowerCAmelCase ) < nth: if is_prime(__lowerCAmelCase ): primes.append(__lowerCAmelCase ) num += 1 else: num += 1 return primes[len(__lowerCAmelCase ) - 1] if __name__ == "__main__": print(F'''{solution() = }''')
652
1
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ : Tuple = { '''configuration_informer''': [ '''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Dict = [ '''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InformerForPrediction''', '''InformerModel''', '''InformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys lowercase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
652
import torch from transformers import AutoModel class lowercase ( torch.nn.Module ): """simple docstring""" def __init__( self : Tuple , lowerCamelCase_ : Dict="sayef/fsner-bert-base-uncased" ): '''simple docstring''' super(lowerCamelCase_ , self ).__init__() _snake_case : Optional[Any] = AutoModel.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ ) _snake_case : str = torch.nn.CosineSimilarity(3 , 1e-08 ) _snake_case : str = torch.nn.Softmax(dim=1 ) def __UpperCAmelCase ( self : int , **lowerCamelCase_ : List[str] ): '''simple docstring''' return self.bert(**lowerCamelCase_ ).last_hidden_state def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' return token_embeddings.sum(2 , keepdim=lowerCamelCase_ ) def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]=1 ): '''simple docstring''' return self.softmax(T * self.cos(lowerCamelCase_ , lowerCamelCase_ ) ) def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' _snake_case : int = W_supports['sizes'].tolist() _snake_case : int = W_supports['start_token_id'].item() _snake_case : List[str] = W_supports['end_token_id'].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] _snake_case : Optional[int] = self.BERT(**lowerCamelCase_ ) _snake_case : Optional[Any] = self.BERT(**lowerCamelCase_ ) _snake_case : Optional[int] = None _snake_case : Optional[int] = None _snake_case : List[str] = W_supports['input_ids'] == start_token_id _snake_case : Union[str, Any] = W_supports['input_ids'] == end_token_id for i, size in enumerate(lowerCamelCase_ ): if i == 0: _snake_case : str = 0 else: _snake_case : Union[str, Any] = support_sizes[i - 1] _snake_case : Tuple = S[s : s + size][start_token_masks[s : s + size]] _snake_case : Optional[int] = S[s : s + size][end_token_masks[s : s + size]] _snake_case : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) _snake_case : Union[str, Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: _snake_case : Optional[Any] = torch.vstack((p_starts, p_start) ) _snake_case : List[str] = torch.vstack((p_ends, p_end) ) else: _snake_case : Union[str, Any] = p_start _snake_case : Any = p_end return p_starts, p_ends
652
1
from __future__ import annotations import numpy as np def A__( __lowerCAmelCase ): _snake_case , _snake_case : int = np.shape(__lowerCAmelCase ) if rows != columns: _snake_case : Tuple = ( '\'table\' has to be of square shaped array but got a ' F'''{rows}x{columns} array:\n{table}''' ) raise ValueError(__lowerCAmelCase ) _snake_case : Union[str, Any] = np.zeros((rows, columns) ) _snake_case : List[Any] = np.zeros((rows, columns) ) for i in range(__lowerCAmelCase ): for j in range(__lowerCAmelCase ): _snake_case : Optional[int] = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) ) if upper[j][j] == 0: raise ArithmeticError('No LU decomposition exists' ) _snake_case : Union[str, Any] = (table[i][j] - total) / upper[j][j] _snake_case : int = 1 for j in range(__lowerCAmelCase , __lowerCAmelCase ): _snake_case : Optional[Any] = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) ) _snake_case : List[str] = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
652
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowercase ( a_ , a_ , a_ ): """simple docstring""" @register_to_config def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , ): '''simple docstring''' super().__init__() _snake_case : Optional[int] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : List[str] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Union[str, Any] = False _snake_case : Tuple = nn.Dropout(p=lowerCamelCase_ ) _snake_case : Union[str, Any] = TaConfig( vocab_size=lowerCamelCase_ , d_model=lowerCamelCase_ , num_heads=lowerCamelCase_ , d_kv=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , feed_forward_proj=lowerCamelCase_ , is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , ) _snake_case : Union[str, Any] = nn.ModuleList() for lyr_num in range(lowerCamelCase_ ): _snake_case : Any = TaBlock(lowerCamelCase_ ) self.encoders.append(lowerCamelCase_ ) _snake_case : Tuple = TaLayerNorm(lowerCamelCase_ ) _snake_case : List[str] = nn.Dropout(p=lowerCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple ): '''simple docstring''' _snake_case : Any = self.token_embedder(lowerCamelCase_ ) _snake_case : List[Any] = encoder_input_tokens.shape[1] _snake_case : Any = torch.arange(lowerCamelCase_ , device=encoder_input_tokens.device ) x += self.position_encoding(lowerCamelCase_ ) _snake_case : Tuple = self.dropout_pre(lowerCamelCase_ ) # inverted the attention mask _snake_case : Dict = encoder_input_tokens.size() _snake_case : Optional[int] = self.get_extended_attention_mask(lowerCamelCase_ , lowerCamelCase_ ) for lyr in self.encoders: _snake_case : str = lyr(lowerCamelCase_ , lowerCamelCase_ )[0] _snake_case : Any = self.layer_norm(lowerCamelCase_ ) return self.dropout_post(lowerCamelCase_ ), encoder_inputs_mask
652
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowercase_ : Union[str, Any] = logging.get_logger(__name__) lowercase_ : Optional[int] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): for attribute in key.split('.' ): _snake_case : Any = getattr(__lowerCAmelCase , __lowerCAmelCase ) if weight_type is not None: _snake_case : List[str] = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape else: _snake_case : Dict = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _snake_case : Tuple = value elif weight_type == "weight_g": _snake_case : List[str] = value elif weight_type == "weight_v": _snake_case : List[str] = value elif weight_type == "bias": _snake_case : int = value else: _snake_case : Dict = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : List[str] = [] _snake_case : Tuple = fairseq_model.state_dict() _snake_case : Any = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _snake_case : str = False if "conv_layers" in name: load_conv_layer( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , ) _snake_case : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): _snake_case : str = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned): _snake_case : Union[str, Any] = True if "*" in mapped_key: _snake_case : List[str] = name.split(__lowerCAmelCase )[0].split('.' )[-2] _snake_case : Tuple = mapped_key.replace('*' , __lowerCAmelCase ) if "weight_g" in name: _snake_case : List[str] = 'weight_g' elif "weight_v" in name: _snake_case : Optional[int] = 'weight_v' elif "weight" in name: _snake_case : Dict = 'weight' elif "bias" in name: _snake_case : List[Any] = 'bias' else: _snake_case : Any = None set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) continue if not is_used: unused_weights.append(__lowerCAmelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Dict = full_name.split('conv_layers.' )[-1] _snake_case : str = name.split('.' ) _snake_case : Union[str, Any] = int(items[0] ) _snake_case : Union[str, Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _snake_case : Optional[int] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _snake_case : Dict = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _snake_case : Any = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _snake_case : List[str] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__lowerCAmelCase ) @torch.no_grad() def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True ): if config_path is not None: _snake_case : Optional[int] = HubertConfig.from_pretrained(__lowerCAmelCase ) else: _snake_case : Dict = HubertConfig() if is_finetuned: if dict_path: _snake_case : Dict = Dictionary.load(__lowerCAmelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _snake_case : int = target_dict.pad_index _snake_case : str = target_dict.bos_index _snake_case : List[Any] = target_dict.eos_index _snake_case : Tuple = len(target_dict.symbols ) _snake_case : List[Any] = os.path.join(__lowerCAmelCase , 'vocab.json' ) if not os.path.isdir(__lowerCAmelCase ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__lowerCAmelCase ) ) return os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(target_dict.indices , __lowerCAmelCase ) _snake_case : Dict = WavaVecaCTCTokenizer( __lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__lowerCAmelCase , ) _snake_case : Tuple = True if config.feat_extract_norm == 'layer' else False _snake_case : str = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ) _snake_case : Any = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase ) processor.save_pretrained(__lowerCAmelCase ) _snake_case : List[str] = HubertForCTC(__lowerCAmelCase ) else: _snake_case : Union[str, Any] = HubertModel(__lowerCAmelCase ) if is_finetuned: _snake_case , _snake_case , _snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: _snake_case , _snake_case , _snake_case : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) _snake_case : List[Any] = model[0].eval() recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) hf_wavavec.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": lowercase_ : Dict = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) lowercase_ : List[str] = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
652
def A__( __lowerCAmelCase ): assert column_title.isupper() _snake_case : List[Any] = 0 _snake_case : List[str] = len(__lowerCAmelCase ) - 1 _snake_case : Dict = 0 while index >= 0: _snake_case : List[str] = (ord(column_title[index] ) - 64) * pow(26 , __lowerCAmelCase ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
652
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ : List[Any] = logging.get_logger(__name__) lowercase_ : Any = { '''caidas/swin2sr-classicalsr-x2-64''': ( '''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json''' ), } class lowercase ( a_ ): """simple docstring""" _UpperCamelCase : Optional[Any] = "swin2sr" _UpperCamelCase : Optional[Any] = { "hidden_size": "embed_dim", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : List[str] , lowerCamelCase_ : str=64 , lowerCamelCase_ : Union[str, Any]=1 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : int=1_80 , lowerCamelCase_ : Optional[int]=[6, 6, 6, 6, 6, 6] , lowerCamelCase_ : List[str]=[6, 6, 6, 6, 6, 6] , lowerCamelCase_ : Tuple=8 , lowerCamelCase_ : Dict=2.0 , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Optional[Any]=0.0 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Optional[int]="gelu" , lowerCamelCase_ : Dict=False , lowerCamelCase_ : List[Any]=0.02 , lowerCamelCase_ : str=1e-5 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : Optional[int]=1.0 , lowerCamelCase_ : Dict="1conv" , lowerCamelCase_ : int="pixelshuffle" , **lowerCamelCase_ : Optional[Any] , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) _snake_case : Dict = image_size _snake_case : str = patch_size _snake_case : Union[str, Any] = num_channels _snake_case : Optional[int] = embed_dim _snake_case : Any = depths _snake_case : Optional[Any] = len(lowerCamelCase_ ) _snake_case : Union[str, Any] = num_heads _snake_case : Optional[int] = window_size _snake_case : Tuple = mlp_ratio _snake_case : Dict = qkv_bias _snake_case : Optional[Any] = hidden_dropout_prob _snake_case : str = attention_probs_dropout_prob _snake_case : Any = drop_path_rate _snake_case : Union[str, Any] = hidden_act _snake_case : List[str] = use_absolute_embeddings _snake_case : Optional[int] = layer_norm_eps _snake_case : Optional[Any] = initializer_range _snake_case : List[str] = upscale _snake_case : Any = img_range _snake_case : Optional[Any] = resi_connection _snake_case : int = upsampler
652
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets lowercase_ : List[str] = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' lowercase_ : Optional[int] = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' lowercase_ : Any = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): """simple docstring""" def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if version.parse(scb.__version__ ) < version.parse('1.4.12' ): raise ImportWarning( 'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n' 'You can install it with `pip install "sacrebleu>=1.4.12"`.' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ), } ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[ 'https://github.com/jhclark/tercom', ] , ) def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ): '''simple docstring''' _snake_case : str = len(references[0] ) if any(len(lowerCamelCase_ ) != references_per_prediction for refs in references ): raise ValueError('Sacrebleu requires the same number of references for each prediction' ) _snake_case : int = [[refs[i] for refs in references] for i in range(lowerCamelCase_ )] _snake_case : Optional[int] = TER( normalized=lowerCamelCase_ , no_punct=lowerCamelCase_ , asian_support=lowerCamelCase_ , case_sensitive=lowerCamelCase_ , ) _snake_case : Optional[Any] = sb_ter.corpus_score(lowerCamelCase_ , lowerCamelCase_ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
652
1
from __future__ import annotations import math def A__( __lowerCAmelCase ): if num <= 0: _snake_case : str = F'''{num}: Invalid input, please enter a positive integer.''' raise ValueError(__lowerCAmelCase ) _snake_case : Tuple = [True] * (num + 1) _snake_case : str = [] _snake_case : int = 2 _snake_case : Dict = int(math.sqrt(__lowerCAmelCase ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(__lowerCAmelCase ) # Set multiples of start be False for i in range(start * start , num + 1 , __lowerCAmelCase ): if sieve[i] is True: _snake_case : Optional[Any] = False start += 1 for j in range(end + 1 , num + 1 ): if sieve[j] is True: prime.append(__lowerCAmelCase ) return prime if __name__ == "__main__": print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
652
import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels lowercase_ : Optional[int] = object() # For specifying empty leaf dict `{}` lowercase_ : List[Any] = object() def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Optional[Any] = tuple((re.compile(x + '$' ) for x in qs) ) for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ): _snake_case : Tuple = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , ks[i:] )] if matches and all(__lowerCAmelCase ): return True return False def A__( __lowerCAmelCase ): def replace(__lowerCAmelCase , __lowerCAmelCase ): for rule, replacement in rules: if _match(__lowerCAmelCase , __lowerCAmelCase ): return replacement return val return replace def A__( ): return [ # embeddings (("transformer", "wpe", "embedding"), P('mp' , __lowerCAmelCase )), (("transformer", "wte", "embedding"), P('mp' , __lowerCAmelCase )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase , 'mp' )), (("attention", "out_proj", "kernel"), P('mp' , __lowerCAmelCase )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__lowerCAmelCase , 'mp' )), (("mlp", "c_fc", "bias"), P('mp' )), (("mlp", "c_proj", "kernel"), P('mp' , __lowerCAmelCase )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def A__( __lowerCAmelCase ): _snake_case : Optional[Any] = _get_partition_rules() _snake_case : Optional[Any] = _replacement_rules(__lowerCAmelCase ) _snake_case : str = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )} _snake_case : str = {k: replace(__lowerCAmelCase , __lowerCAmelCase ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__lowerCAmelCase ) )
652
1
import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowercase : """simple docstring""" def __init__( self : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int]=13 , lowerCamelCase_ : Optional[Any]=30 , lowerCamelCase_ : str=2 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : int=32 , lowerCamelCase_ : Union[str, Any]=5 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Dict=37 , lowerCamelCase_ : Optional[int]="gelu" , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : List[str]=10 , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : List[str]=None , ): '''simple docstring''' _snake_case : Tuple = parent _snake_case : Union[str, Any] = batch_size _snake_case : Tuple = image_size _snake_case : str = patch_size _snake_case : Dict = num_channels _snake_case : List[str] = is_training _snake_case : Optional[Any] = use_labels _snake_case : Optional[Any] = hidden_size _snake_case : List[str] = num_hidden_layers _snake_case : str = num_attention_heads _snake_case : str = intermediate_size _snake_case : int = hidden_act _snake_case : Dict = hidden_dropout_prob _snake_case : List[Any] = attention_probs_dropout_prob _snake_case : int = type_sequence_label_size _snake_case : int = initializer_range _snake_case : int = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _snake_case : int = (image_size // patch_size) ** 2 _snake_case : Optional[Any] = num_patches + 1 def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : Dict = None if self.use_labels: _snake_case : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case : Union[str, Any] = self.get_config() return config, pixel_values, labels def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ): '''simple docstring''' _snake_case : List[Any] = ViTMSNModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() _snake_case : int = model(lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' _snake_case : str = self.type_sequence_label_size _snake_case : List[Any] = ViTMSNForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() _snake_case : List[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ ) print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' ) print('Labels: {labels}' ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _snake_case : List[Any] = 1 _snake_case : Tuple = ViTMSNForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() _snake_case : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _snake_case : Union[str, Any] = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __UpperCAmelCase ( self : Dict ): '''simple docstring''' _snake_case : Union[str, Any] = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : List[Any] = config_and_inputs _snake_case : List[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase ( a_ , a_ , unittest.TestCase ): """simple docstring""" _UpperCamelCase : Optional[int] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () _UpperCamelCase : str = ( {"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification} if is_torch_available() else {} ) _UpperCamelCase : Optional[Any] = False _UpperCamelCase : Union[str, Any] = False _UpperCamelCase : str = False _UpperCamelCase : Tuple = False def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' _snake_case : Any = ViTMSNModelTester(self ) _snake_case : List[Any] = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 ) def __UpperCAmelCase ( self : int ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='ViTMSN does not use inputs_embeds' ) def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' pass def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : List[str] = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _snake_case : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) ) def __UpperCAmelCase ( self : Dict ): '''simple docstring''' _snake_case , _snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Union[str, Any] = model_class(lowerCamelCase_ ) _snake_case : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : Optional[Any] = [*signature.parameters.keys()] _snake_case : Union[str, Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def __UpperCAmelCase ( self : Dict ): '''simple docstring''' _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' _snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def __UpperCAmelCase ( self : str ): '''simple docstring''' for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = ViTMSNModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def A__( ): _snake_case : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase ( unittest.TestCase ): """simple docstring""" @cached_property def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None @slow def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' torch.manual_seed(2 ) _snake_case : Optional[int] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(lowerCamelCase_ ) _snake_case : Optional[Any] = self.default_image_processor _snake_case : Any = prepare_img() _snake_case : Optional[Any] = image_processor(images=lowerCamelCase_ , return_tensors='pt' ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): _snake_case : Tuple = model(**lowerCamelCase_ ) # verify the logits _snake_case : int = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) _snake_case : List[Any] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
652
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel lowercase_ : Any = logging.getLogger(__name__) def A__( __lowerCAmelCase , __lowerCAmelCase ): # save results if os.path.exists(__lowerCAmelCase ): if os.path.exists(os.path.join(__lowerCAmelCase , 'config.json' ) ) and os.path.isfile( os.path.join(__lowerCAmelCase , 'config.json' ) ): os.remove(os.path.join(__lowerCAmelCase , 'config.json' ) ) if os.path.exists(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ): os.remove(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) else: os.makedirs(__lowerCAmelCase ) model.save_pretrained(__lowerCAmelCase ) def A__( __lowerCAmelCase , __lowerCAmelCase=False ): _snake_case : int = 2 if unlogit: _snake_case : Dict = torch.pow(__lowerCAmelCase , __lowerCAmelCase ) _snake_case : Optional[int] = p * torch.log(__lowerCAmelCase ) _snake_case : List[str] = 0 return -plogp.sum(dim=-1 ) def A__( __lowerCAmelCase ): logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(__lowerCAmelCase ) ) ) ) for row in range(len(__lowerCAmelCase ) ): if tensor.dtype != torch.long: logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False ): _snake_case , _snake_case : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads _snake_case : Optional[int] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device ) _snake_case : Union[str, Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device ) if head_mask is None: _snake_case : List[str] = torch.ones(__lowerCAmelCase , __lowerCAmelCase ).to(args.device ) head_mask.requires_grad_(requires_grad=__lowerCAmelCase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _snake_case : List[str] = None _snake_case : str = 0.0 _snake_case : List[str] = 0.0 for step, inputs in enumerate(tqdm(__lowerCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ): _snake_case : Dict = tuple(t.to(args.device ) for t in inputs ) ((_snake_case) , ) : int = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _snake_case : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase , head_mask=__lowerCAmelCase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _snake_case , _snake_case , _snake_case : Dict = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__lowerCAmelCase ): _snake_case : int = entropy(attn.detach() , __lowerCAmelCase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__lowerCAmelCase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _snake_case : List[str] = 2 _snake_case : Any = torch.pow(torch.pow(__lowerCAmelCase , __lowerCAmelCase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: _snake_case : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(__lowerCAmelCase ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(__lowerCAmelCase ) logger.info('Head ranked by importance scores' ) _snake_case : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _snake_case : Optional[Any] = torch.arange( head_importance.numel() , device=args.device ) _snake_case : Union[str, Any] = head_ranks.view_as(__lowerCAmelCase ) print_ad_tensor(__lowerCAmelCase ) return attn_entropy, head_importance, total_loss def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case , _snake_case , _snake_case : int = compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase ) _snake_case : List[str] = 1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' , __lowerCAmelCase , original_score * args.masking_threshold ) _snake_case : Optional[Any] = torch.ones_like(__lowerCAmelCase ) _snake_case : Tuple = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _snake_case : Dict = original_score while current_score >= original_score * args.masking_threshold: _snake_case : List[str] = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _snake_case : Optional[Any] = float('Inf' ) _snake_case : Optional[int] = head_importance.view(-1 ).sort()[1] if len(__lowerCAmelCase ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads _snake_case : List[Any] = current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) ) _snake_case : Tuple = new_head_mask.view(-1 ) _snake_case : List[str] = 0.0 _snake_case : int = new_head_mask.view_as(__lowerCAmelCase ) _snake_case : Optional[Any] = new_head_mask.clone().detach() print_ad_tensor(__lowerCAmelCase ) # Compute metric and head importance again _snake_case , _snake_case , _snake_case : List[str] = compute_heads_importance( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , head_mask=__lowerCAmelCase ) _snake_case : Union[str, Any] = 1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' , __lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , ) logger.info('Final head mask' ) print_ad_tensor(__lowerCAmelCase ) np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() ) return head_mask def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : List[Any] = datetime.now() _snake_case , _snake_case , _snake_case : List[Any] = compute_heads_importance( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase ) _snake_case : str = 1 / loss _snake_case : Optional[int] = datetime.now() - before_time _snake_case : Optional[int] = sum(p.numel() for p in model.parameters() ) _snake_case : Optional[Any] = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCAmelCase ) ) } for k, v in heads_to_prune.items(): if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _snake_case : Optional[int] = [ v, ] assert sum(len(__lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__lowerCAmelCase ) _snake_case : Optional[Any] = sum(p.numel() for p in model.parameters() ) _snake_case : List[str] = datetime.now() _snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase , actually_pruned=__lowerCAmelCase , ) _snake_case : Dict = 1 / loss _snake_case : str = datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __lowerCAmelCase , __lowerCAmelCase , pruned_num_params / original_num_params * 1_00 , ) logger.info('Pruning: score with masking: %f score with pruning: %f' , __lowerCAmelCase , __lowerCAmelCase ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 ) save_model(__lowerCAmelCase , args.output_dir ) def A__( ): _snake_case : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , ) parser.add_argument( '--model_name_or_path' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , ) # Other parameters parser.add_argument( '--config_name' , default='' , type=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , ) parser.add_argument( '--tokenizer_name' , default='' , type=__lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , ) parser.add_argument( '--cache_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , ) parser.add_argument( '--data_subset' , type=__lowerCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , ) parser.add_argument( '--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' , default=0.9 , type=__lowerCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , ) parser.add_argument( '--masking_amount' , default=0.1 , type=__lowerCAmelCase , help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' , default='acc' , type=__lowerCAmelCase , help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' , default=1_28 , type=__lowerCAmelCase , help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) , ) parser.add_argument('--batch_size' , default=1 , type=__lowerCAmelCase , help='Batch size.' ) parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 ) parser.add_argument('--local_rank' , type=__lowerCAmelCase , default=-1 , help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' ) _snake_case : List[str] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _snake_case : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) _snake_case : Any = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _snake_case : Union[str, Any] = torch.device('cuda' , args.local_rank ) _snake_case : Dict = 1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _snake_case : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _snake_case : List[str] = nn.parallel.DistributedDataParallel( __lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCAmelCase ) elif args.n_gpu > 1: _snake_case : int = nn.DataParallel(__lowerCAmelCase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase ) torch.save(__lowerCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' , __lowerCAmelCase ) # Prepare dataset _snake_case : str = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _snake_case : Dict = (torch.from_numpy(__lowerCAmelCase ),) _snake_case : List[Any] = TensorDataset(*__lowerCAmelCase ) _snake_case : List[str] = RandomSampler(__lowerCAmelCase ) _snake_case : List[str] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _snake_case : Union[str, Any] = mask_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) prune_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": main()
652
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=a_ ) class lowercase ( a_ ): """simple docstring""" _UpperCamelCase : str = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) _UpperCamelCase : ClassVar[Features] = Features({"image": Image()} ) _UpperCamelCase : ClassVar[Features] = Features({"labels": ClassLabel} ) _UpperCamelCase : str = "image" _UpperCamelCase : str = "labels" def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Dict ): '''simple docstring''' if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , lowerCamelCase_ ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) _snake_case : str = copy.deepcopy(self ) _snake_case : Tuple = self.label_schema.copy() _snake_case : Any = features[self.label_column] _snake_case : Optional[Any] = label_schema return task_template @property def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' return { self.image_column: "image", self.label_column: "labels", }
652
def A__( __lowerCAmelCase ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError('only integers accepted as input' ) else: _snake_case : Any = str(abs(__lowerCAmelCase ) ) _snake_case : List[str] = [list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )] for index in range(len(__lowerCAmelCase ) ): num_transpositions[index].pop(__lowerCAmelCase ) return max( int(''.join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
652
1
import math import sys def A__( __lowerCAmelCase ): if number != int(__lowerCAmelCase ): raise ValueError('the value of input must be a natural number' ) if number < 0: raise ValueError('the value of input must not be a negative number' ) if number == 0: return 1 _snake_case : str = [-1] * (number + 1) _snake_case : int = 0 for i in range(1 , number + 1 ): _snake_case : Union[str, Any] = sys.maxsize _snake_case : Optional[Any] = int(math.sqrt(__lowerCAmelCase ) ) for j in range(1 , root + 1 ): _snake_case : Union[str, Any] = 1 + answers[i - (j**2)] _snake_case : Tuple = min(__lowerCAmelCase , __lowerCAmelCase ) _snake_case : Dict = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
652
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask lowercase_ : Tuple = logging.getLogger(__name__) class lowercase ( a_ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Dict=-1 ): '''simple docstring''' _snake_case : str = label_idx def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[Split, str] ): '''simple docstring''' if isinstance(lowerCamelCase_ , lowerCamelCase_ ): _snake_case : Union[str, Any] = mode.value _snake_case : Optional[int] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' ) _snake_case : Dict = 1 _snake_case : List[str] = [] with open(lowerCamelCase_ , encoding='utf-8' ) as f: _snake_case : List[Any] = [] _snake_case : int = [] for line in f: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) ) guid_index += 1 _snake_case : Optional[Any] = [] _snake_case : Union[str, Any] = [] else: _snake_case : Tuple = line.split(' ' ) words.append(splits[0] ) if len(lowerCamelCase_ ) > 1: labels.append(splits[self.label_idx].replace('\n' , '' ) ) else: # Examples could have no label for mode = "test" labels.append('O' ) if words: examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) ) return examples def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ): '''simple docstring''' _snake_case : str = 0 for line in test_input_reader: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": writer.write(lowerCamelCase_ ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: _snake_case : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n' writer.write(lowerCamelCase_ ) else: logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] ) def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ): '''simple docstring''' if path: with open(lowerCamelCase_ , 'r' ) as f: _snake_case : Optional[int] = f.read().splitlines() if "O" not in labels: _snake_case : Optional[int] = ['O'] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class lowercase ( a_ ): """simple docstring""" def __init__( self : Optional[int] ): '''simple docstring''' super().__init__(label_idx=-2 ) def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ): '''simple docstring''' if path: with open(lowerCamelCase_ , 'r' ) as f: _snake_case : str = f.read().splitlines() if "O" not in labels: _snake_case : Union[str, Any] = ['O'] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class lowercase ( a_ ): """simple docstring""" def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[Split, str] ): '''simple docstring''' if isinstance(lowerCamelCase_ , lowerCamelCase_ ): _snake_case : str = mode.value _snake_case : List[str] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' ) _snake_case : Tuple = 1 _snake_case : List[str] = [] with open(lowerCamelCase_ , encoding='utf-8' ) as f: for sentence in parse_incr(lowerCamelCase_ ): _snake_case : List[str] = [] _snake_case : str = [] for token in sentence: words.append(token['form'] ) labels.append(token['upos'] ) assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) if words: examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) ) guid_index += 1 return examples def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ): '''simple docstring''' _snake_case : Dict = 0 for sentence in parse_incr(lowerCamelCase_ ): _snake_case : Optional[int] = preds_list[example_id] _snake_case : List[Any] = '' for token in sentence: out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) ''' out += "\n" writer.write(lowerCamelCase_ ) example_id += 1 def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ): '''simple docstring''' if path: with open(lowerCamelCase_ , 'r' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
652
1
import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Union[str, Any] = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' _snake_case : Union[str, Any] = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert('RGB' ) _snake_case : Optional[Any] = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ), ] ) _snake_case : Union[str, Any] = transform(__lowerCAmelCase ).unsqueeze(0 ).to(__lowerCAmelCase ) return image def A__( __lowerCAmelCase ): if "visual_encoder" in key: _snake_case : Union[str, Any] = re.sub('visual_encoder*' , 'vision_model.encoder' , __lowerCAmelCase ) if "blocks" in key: _snake_case : Optional[int] = re.sub(R'blocks' , 'layers' , __lowerCAmelCase ) if "attn" in key: _snake_case : Dict = re.sub(R'attn' , 'self_attn' , __lowerCAmelCase ) if "norm1" in key: _snake_case : str = re.sub(R'norm1' , 'layer_norm1' , __lowerCAmelCase ) if "norm2" in key: _snake_case : List[str] = re.sub(R'norm2' , 'layer_norm2' , __lowerCAmelCase ) if "encoder.norm" in key: _snake_case : Union[str, Any] = re.sub(R'encoder.norm' , 'post_layernorm' , __lowerCAmelCase ) if "encoder.patch_embed.proj" in key: _snake_case : List[Any] = re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , __lowerCAmelCase ) if "encoder.pos_embed" in key: _snake_case : List[Any] = re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , __lowerCAmelCase ) if "encoder.cls_token" in key: _snake_case : List[Any] = re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , __lowerCAmelCase ) if "self_attn" in key: _snake_case : Union[str, Any] = re.sub(R'self_attn.proj' , 'self_attn.projection' , __lowerCAmelCase ) return key @torch.no_grad() def A__( __lowerCAmelCase , __lowerCAmelCase=None ): if config_path is not None: _snake_case : Dict = BlipConfig.from_pretrained(__lowerCAmelCase ) else: _snake_case : List[Any] = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} ) _snake_case : str = BlipForConditionalGeneration(__lowerCAmelCase ).eval() _snake_case : List[str] = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth' _snake_case : Any = blip_decoder(pretrained=__lowerCAmelCase , image_size=3_84 , vit='base' ) _snake_case : Union[str, Any] = pt_model.eval() _snake_case : Union[str, Any] = pt_model.state_dict() for key in modified_state_dict.copy(): _snake_case : int = modified_state_dict.pop(__lowerCAmelCase ) _snake_case : int = rename_key(__lowerCAmelCase ) _snake_case : Optional[int] = value hf_model.load_state_dict(__lowerCAmelCase ) _snake_case : Any = 3_84 _snake_case : Dict = load_demo_image(image_size=__lowerCAmelCase , device='cpu' ) _snake_case : Optional[Any] = BertTokenizer.from_pretrained('bert-base-uncased' ) _snake_case : List[Any] = tokenizer(['a picture of'] ).input_ids _snake_case : Any = hf_model.generate(__lowerCAmelCase , __lowerCAmelCase ) assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02] _snake_case : Optional[Any] = hf_model.generate(__lowerCAmelCase ) assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(__lowerCAmelCase ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' _snake_case : int = ( 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth' ) _snake_case : Optional[Any] = blip_vqa(pretrained=__lowerCAmelCase , image_size=__lowerCAmelCase , vit='base' ) vqa_model.eval() _snake_case : str = vqa_model.state_dict() for key in modified_state_dict.copy(): _snake_case : List[str] = modified_state_dict.pop(__lowerCAmelCase ) _snake_case : Optional[Any] = rename_key(__lowerCAmelCase ) _snake_case : Optional[Any] = value _snake_case : Optional[int] = BlipForQuestionAnswering(__lowerCAmelCase ) hf_vqa_model.load_state_dict(__lowerCAmelCase ) _snake_case : Optional[Any] = ['How many dogs are in this image?'] _snake_case : Tuple = tokenizer(__lowerCAmelCase , return_tensors='pt' ).input_ids _snake_case : int = hf_vqa_model.generate(__lowerCAmelCase , __lowerCAmelCase ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' ) _snake_case : Dict = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth' _snake_case : List[Any] = blip_itm(pretrained=__lowerCAmelCase , image_size=__lowerCAmelCase , vit='base' ) itm_model.eval() _snake_case : int = itm_model.state_dict() for key in modified_state_dict.copy(): _snake_case : Tuple = modified_state_dict.pop(__lowerCAmelCase ) _snake_case : Optional[Any] = rename_key(__lowerCAmelCase ) _snake_case : Union[str, Any] = value _snake_case : Optional[Any] = BlipForImageTextRetrieval(__lowerCAmelCase ) _snake_case : int = ['A picture of a woman with a dog sitting in a beach'] _snake_case : Optional[int] = tokenizer( __lowerCAmelCase , return_tensors='pt' , padding='max_length' , truncation=__lowerCAmelCase , max_length=35 , ).input_ids hf_itm_model.load_state_dict(__lowerCAmelCase ) hf_itm_model.eval() _snake_case : List[Any] = hf_itm_model(__lowerCAmelCase , __lowerCAmelCase , use_itm_head=__lowerCAmelCase ) _snake_case : Optional[int] = hf_itm_model(__lowerCAmelCase , __lowerCAmelCase , use_itm_head=__lowerCAmelCase ) assert out[0].item() == 0.2_110_687_494_277_954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' ) if __name__ == "__main__": lowercase_ : Tuple = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') lowercase_ : Optional[Any] = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
652
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class lowercase ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __UpperCAmelCase ( self : Optional[int] ): '''simple docstring''' _snake_case : Tuple = 1 _snake_case : str = 3 _snake_case : List[str] = (32, 32) _snake_case : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ ) return image @property def __UpperCAmelCase ( self : str ): '''simple docstring''' torch.manual_seed(0 ) _snake_case : Union[str, Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) return model @property def __UpperCAmelCase ( self : Dict ): '''simple docstring''' torch.manual_seed(0 ) _snake_case : Optional[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' torch.manual_seed(0 ) _snake_case : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModel(lowerCamelCase_ ) @property def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' def extract(*lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ): class lowercase : """simple docstring""" def __init__( self : Tuple ): '''simple docstring''' _snake_case : List[str] = torch.ones([0] ) def __UpperCAmelCase ( self : int , lowerCamelCase_ : Tuple ): '''simple docstring''' self.pixel_values.to(lowerCamelCase_ ) return self return Out() return extract def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator _snake_case : int = self.dummy_cond_unet _snake_case : str = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , ) _snake_case : Union[str, Any] = self.dummy_vae _snake_case : Optional[Any] = self.dummy_text_encoder _snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # make sure here that pndm scheduler skips prk _snake_case : Union[str, Any] = StableDiffusionPipeline( unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , ) _snake_case : str = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : List[str] = 'A painting of a squirrel eating a burger' _snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) _snake_case : Optional[int] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' ) _snake_case : Union[str, Any] = output.images _snake_case : List[str] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) _snake_case : Any = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0] _snake_case : Tuple = image[0, -3:, -3:, -1] _snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _snake_case : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' _snake_case : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator _snake_case : List[str] = self.dummy_cond_unet _snake_case : List[str] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) _snake_case : int = self.dummy_vae _snake_case : List[Any] = self.dummy_text_encoder _snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # make sure here that pndm scheduler skips prk _snake_case : Any = StableDiffusionPipeline( unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , ) _snake_case : Union[str, Any] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : str = 'A painting of a squirrel eating a burger' _snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) _snake_case : Tuple = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' ) _snake_case : Optional[Any] = output.images _snake_case : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) _snake_case : Tuple = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0] _snake_case : Dict = image[0, -3:, -3:, -1] _snake_case : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _snake_case : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : Union[str, Any] = StableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowerCamelCase_ ) assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) assert isinstance(pipe.scheduler , lowerCamelCase_ ) assert pipe.safety_checker is None _snake_case : Dict = pipe('example prompt' , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase_ ) _snake_case : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ ) # sanity check that the pipeline still works assert pipe.safety_checker is None _snake_case : Union[str, Any] = pipe('example prompt' , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' _snake_case : Union[str, Any] = self.dummy_cond_unet _snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) _snake_case : Any = self.dummy_vae _snake_case : Optional[Any] = self.dummy_text_encoder _snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # put models in fp16 _snake_case : str = unet.half() _snake_case : Union[str, Any] = vae.half() _snake_case : Dict = bert.half() # make sure here that pndm scheduler skips prk _snake_case : List[str] = StableDiffusionPipeline( unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , ) _snake_case : List[str] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : Tuple = 'A painting of a squirrel eating a burger' _snake_case : Optional[int] = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class lowercase ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' _snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ ) _snake_case : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) _snake_case : Any = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : Optional[int] = ( 'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle' ' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with' ' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and' ' children from bahnhof zoo, detailed ' ) _snake_case : List[str] = 40_03_66_03_46 _snake_case : int = 7 # without safety guidance (sld_guidance_scale = 0) _snake_case : Union[str, Any] = torch.manual_seed(lowerCamelCase_ ) _snake_case : Union[str, Any] = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) _snake_case : str = output.images _snake_case : Dict = image[0, -3:, -3:, -1] _snake_case : Optional[int] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 # without safety guidance (strong configuration) _snake_case : Tuple = torch.manual_seed(lowerCamelCase_ ) _snake_case : int = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) _snake_case : Tuple = output.images _snake_case : int = image[0, -3:, -3:, -1] _snake_case : List[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : str = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ ) _snake_case : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) _snake_case : Any = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : Union[str, Any] = 'padme amidala taking a bath artwork, safe for work, no nudity' _snake_case : Optional[Any] = 27_34_97_17_55 _snake_case : Union[str, Any] = 7 _snake_case : Dict = torch.manual_seed(lowerCamelCase_ ) _snake_case : Tuple = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) _snake_case : Any = output.images _snake_case : int = image[0, -3:, -3:, -1] _snake_case : str = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 _snake_case : Optional[Any] = torch.manual_seed(lowerCamelCase_ ) _snake_case : Any = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) _snake_case : str = output.images _snake_case : List[str] = image[0, -3:, -3:, -1] _snake_case : Union[str, Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' _snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' ) _snake_case : Optional[int] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : List[Any] = ( 'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.' ' leyendecker' ) _snake_case : Union[str, Any] = 10_44_35_52_34 _snake_case : Dict = 12 _snake_case : Optional[int] = torch.manual_seed(lowerCamelCase_ ) _snake_case : Any = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) _snake_case : Optional[int] = output.images _snake_case : int = image[0, -3:, -3:, -1] _snake_case : Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7 _snake_case : List[Any] = torch.manual_seed(lowerCamelCase_ ) _snake_case : Optional[int] = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) _snake_case : str = output.images _snake_case : List[str] = image[0, -3:, -3:, -1] _snake_case : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] ) assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
652
1
import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowercase ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' _snake_case , _snake_case : Optional[int] = FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-canny' , from_pt=lowerCamelCase_ , dtype=jnp.bfloataa ) _snake_case , _snake_case : Union[str, Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , controlnet=lowerCamelCase_ , from_pt=lowerCamelCase_ , dtype=jnp.bfloataa ) _snake_case : str = controlnet_params _snake_case : Optional[Any] = 'bird' _snake_case : List[Any] = jax.device_count() _snake_case : Union[str, Any] = pipe.prepare_text_inputs([prompts] * num_samples ) _snake_case : Optional[int] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ) _snake_case : List[str] = pipe.prepare_image_inputs([canny_image] * num_samples ) _snake_case : Optional[Any] = jax.random.PRNGKey(0 ) _snake_case : str = jax.random.split(lowerCamelCase_ , jax.device_count() ) _snake_case : int = replicate(lowerCamelCase_ ) _snake_case : Optional[Any] = shard(lowerCamelCase_ ) _snake_case : List[str] = shard(lowerCamelCase_ ) _snake_case : List[str] = pipe( prompt_ids=lowerCamelCase_ , image=lowerCamelCase_ , params=lowerCamelCase_ , prng_seed=lowerCamelCase_ , num_inference_steps=50 , jit=lowerCamelCase_ , ).images assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3) _snake_case : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) _snake_case : List[str] = images[0, 2_53:2_56, 2_53:2_56, -1] _snake_case : int = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _snake_case : int = jnp.array( [0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] ) print(f'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' _snake_case , _snake_case : List[str] = FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-openpose' , from_pt=lowerCamelCase_ , dtype=jnp.bfloataa ) _snake_case , _snake_case : str = FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , controlnet=lowerCamelCase_ , from_pt=lowerCamelCase_ , dtype=jnp.bfloataa ) _snake_case : List[Any] = controlnet_params _snake_case : Optional[int] = 'Chef in the kitchen' _snake_case : str = jax.device_count() _snake_case : str = pipe.prepare_text_inputs([prompts] * num_samples ) _snake_case : Optional[int] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' ) _snake_case : int = pipe.prepare_image_inputs([pose_image] * num_samples ) _snake_case : str = jax.random.PRNGKey(0 ) _snake_case : Any = jax.random.split(lowerCamelCase_ , jax.device_count() ) _snake_case : Union[str, Any] = replicate(lowerCamelCase_ ) _snake_case : Any = shard(lowerCamelCase_ ) _snake_case : Any = shard(lowerCamelCase_ ) _snake_case : Tuple = pipe( prompt_ids=lowerCamelCase_ , image=lowerCamelCase_ , params=lowerCamelCase_ , prng_seed=lowerCamelCase_ , num_inference_steps=50 , jit=lowerCamelCase_ , ).images assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3) _snake_case : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) _snake_case : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1] _snake_case : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _snake_case : Optional[Any] = jnp.array( [[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] ) print(f'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
652
import functools def A__( __lowerCAmelCase , __lowerCAmelCase ): # Validation if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for day in days ): raise ValueError('The parameter days should be a list of integers' ) if len(__lowerCAmelCase ) != 3 or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for cost in costs ): raise ValueError('The parameter costs should be a list of three integers' ) if len(__lowerCAmelCase ) == 0: return 0 if min(__lowerCAmelCase ) <= 0: raise ValueError('All days elements should be greater than 0' ) if max(__lowerCAmelCase ) >= 3_66: raise ValueError('All days elements should be less than 366' ) _snake_case : Optional[int] = set(__lowerCAmelCase ) @functools.cache def dynamic_programming(__lowerCAmelCase ) -> int: if index > 3_65: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
652
1
from __future__ import annotations def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Dict = list(range(len(__lowerCAmelCase ) ) ) _snake_case : Optional[int] = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )] index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=__lowerCAmelCase ) _snake_case : float = 0 _snake_case : list[float] = [0] * len(__lowerCAmelCase ) for i in index: if weight[i] <= capacity: _snake_case : List[Any] = 1 max_value += value[i] capacity -= weight[i] else: _snake_case : Any = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
652
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor lowercase_ : str = logging.get_logger(__name__) class lowercase ( a_ ): """simple docstring""" def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ): '''simple docstring''' warnings.warn( 'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , ) super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
652
1
class lowercase : """simple docstring""" def __init__( self : Union[str, Any] ): '''simple docstring''' _snake_case : dict[str, TrieNode] = {} # Mapping from char to TrieNode _snake_case : Dict = False def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : list[str] ): '''simple docstring''' for word in words: self.insert(lowerCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ): '''simple docstring''' _snake_case : str = self for char in word: if char not in curr.nodes: _snake_case : List[Any] = TrieNode() _snake_case : int = curr.nodes[char] _snake_case : Optional[int] = True def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : str ): '''simple docstring''' _snake_case : Tuple = self for char in word: if char not in curr.nodes: return False _snake_case : Optional[int] = curr.nodes[char] return curr.is_leaf def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : str ): '''simple docstring''' def _delete(lowerCamelCase_ : TrieNode , lowerCamelCase_ : str , lowerCamelCase_ : int ) -> bool: if index == len(lowerCamelCase_ ): # If word does not exist if not curr.is_leaf: return False _snake_case : Union[str, Any] = False return len(curr.nodes ) == 0 _snake_case : Dict = word[index] _snake_case : Tuple = curr.nodes.get(lowerCamelCase_ ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted _snake_case : Dict = _delete(lowerCamelCase_ , lowerCamelCase_ , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , lowerCamelCase_ , 0 ) def A__( __lowerCAmelCase , __lowerCAmelCase ): if node.is_leaf: print(__lowerCAmelCase , end=' ' ) for key, value in node.nodes.items(): print_words(__lowerCAmelCase , word + key ) def A__( ): _snake_case : Union[str, Any] = 'banana bananas bandana band apple all beast'.split() _snake_case : Union[str, Any] = TrieNode() root.insert_many(__lowerCAmelCase ) # print_words(root, "") assert all(root.find(__lowerCAmelCase ) for word in words ) assert root.find('banana' ) assert not root.find('bandanas' ) assert not root.find('apps' ) assert root.find('apple' ) assert root.find('all' ) root.delete('all' ) assert not root.find('all' ) root.delete('banana' ) assert not root.find('banana' ) assert root.find('bananas' ) return True def A__( __lowerCAmelCase , __lowerCAmelCase ): print(str(__lowerCAmelCase ) , 'works!' if passes else 'doesn\'t work :(' ) def A__( ): assert test_trie() def A__( ): print_results('Testing trie functionality' , test_trie() ) if __name__ == "__main__": main()
652
from math import factorial def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): if successes > trials: raise ValueError('successes must be lower or equal to trials' ) if trials < 0 or successes < 0: raise ValueError('the function is defined for non-negative integers' ) if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError('the function is defined for non-negative integers' ) if not 0 < prob < 1: raise ValueError('prob has to be in range of 1 - 0' ) _snake_case : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! _snake_case : List[Any] = float(factorial(__lowerCAmelCase ) ) coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('''Probability of 2 successes out of 4 trails''') print('''with probability of 0.75 is:''', end=''' ''') print(binomial_distribution(2, 4, 0.75))
652
1
import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) lowercase_ : List[str] = logging.getLogger(__name__) class lowercase ( a_ ): """simple docstring""" def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : Optional[int]=None ): '''simple docstring''' _snake_case : List[Any] = self.layer[current_layer](lowerCamelCase_ , lowerCamelCase_ , head_mask[current_layer] ) _snake_case : Union[str, Any] = layer_outputs[0] return hidden_states @add_start_docstrings( "The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , a_ , ) class lowercase ( a_ ): """simple docstring""" def __init__( self : int , lowerCamelCase_ : str ): '''simple docstring''' super().__init__(lowerCamelCase_ ) _snake_case : List[str] = BertEncoderWithPabee(lowerCamelCase_ ) self.init_weights() _snake_case : Tuple = 0 _snake_case : List[str] = 0 _snake_case : Dict = 0 _snake_case : Optional[int] = 0 def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' _snake_case : Optional[Any] = threshold def __UpperCAmelCase ( self : Any , lowerCamelCase_ : int ): '''simple docstring''' _snake_case : Dict = patience def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' _snake_case : List[Any] = 0 _snake_case : str = 0 def __UpperCAmelCase ( self : Dict ): '''simple docstring''' _snake_case : Tuple = self.inference_layers_num / self.inference_instances_num _snake_case : str = ( f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =''' f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***''' ) print(lowerCamelCase_ ) @add_start_docstrings_to_model_forward(lowerCamelCase_ ) def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : str=None , lowerCamelCase_ : Tuple=False , ): '''simple docstring''' if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' ) elif input_ids is not None: _snake_case : List[str] = input_ids.size() elif inputs_embeds is not None: _snake_case : Optional[int] = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds' ) _snake_case : List[str] = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: _snake_case : List[Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ ) if token_type_ids is None: _snake_case : Optional[int] = torch.zeros(lowerCamelCase_ , dtype=torch.long , device=lowerCamelCase_ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. _snake_case : torch.Tensor = self.get_extended_attention_mask(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: _snake_case , _snake_case , _snake_case : Tuple = encoder_hidden_states.size() _snake_case : List[str] = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: _snake_case : Dict = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ ) _snake_case : int = self.invert_attention_mask(lowerCamelCase_ ) else: _snake_case : str = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] _snake_case : Any = self.get_head_mask(lowerCamelCase_ , self.config.num_hidden_layers ) _snake_case : Dict = self.embeddings( input_ids=lowerCamelCase_ , position_ids=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , inputs_embeds=lowerCamelCase_ ) _snake_case : List[Any] = embedding_output if self.training: _snake_case : int = [] for i in range(self.config.num_hidden_layers ): _snake_case : Tuple = self.encoder.adaptive_forward( lowerCamelCase_ , current_layer=lowerCamelCase_ , attention_mask=lowerCamelCase_ , head_mask=lowerCamelCase_ ) _snake_case : int = self.pooler(lowerCamelCase_ ) _snake_case : List[str] = output_layers[i](output_dropout(lowerCamelCase_ ) ) res.append(lowerCamelCase_ ) elif self.patience == 0: # Use all layers for inference _snake_case : Union[str, Any] = self.encoder( lowerCamelCase_ , attention_mask=lowerCamelCase_ , head_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , ) _snake_case : int = self.pooler(encoder_outputs[0] ) _snake_case : str = [output_layers[self.config.num_hidden_layers - 1](lowerCamelCase_ )] else: _snake_case : Dict = 0 _snake_case : Optional[int] = None _snake_case : Union[str, Any] = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 _snake_case : List[str] = self.encoder.adaptive_forward( lowerCamelCase_ , current_layer=lowerCamelCase_ , attention_mask=lowerCamelCase_ , head_mask=lowerCamelCase_ ) _snake_case : List[str] = self.pooler(lowerCamelCase_ ) _snake_case : Dict = output_layers[i](lowerCamelCase_ ) if regression: _snake_case : Union[str, Any] = logits.detach() if patient_result is not None: _snake_case : Union[str, Any] = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: _snake_case : int = 0 else: _snake_case : Optional[Any] = logits.detach().argmax(dim=1 ) if patient_result is not None: _snake_case : Optional[int] = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(lowerCamelCase_ ) ): patient_counter += 1 else: _snake_case : Optional[int] = 0 _snake_case : Dict = logits if patient_counter == self.patience: break _snake_case : Any = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( "Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , a_ , ) class lowercase ( a_ ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' super().__init__(lowerCamelCase_ ) _snake_case : List[str] = config.num_labels _snake_case : List[Any] = BertModelWithPabee(lowerCamelCase_ ) _snake_case : List[str] = nn.Dropout(config.hidden_dropout_prob ) _snake_case : Optional[int] = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(lowerCamelCase_ ) def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : int=None , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : List[str]=None , ): '''simple docstring''' _snake_case : Tuple = self.bert( input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , position_ids=lowerCamelCase_ , head_mask=lowerCamelCase_ , inputs_embeds=lowerCamelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) _snake_case : int = (logits[-1],) if labels is not None: _snake_case : Dict = None _snake_case : Optional[Any] = 0 for ix, logits_item in enumerate(lowerCamelCase_ ): if self.num_labels == 1: # We are doing regression _snake_case : int = MSELoss() _snake_case : Optional[int] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: _snake_case : str = CrossEntropyLoss() _snake_case : Any = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: _snake_case : List[str] = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 _snake_case : Optional[int] = (total_loss / total_weights,) + outputs return outputs
652
lowercase_ : Tuple = ''' # Installazione di Transformers ! pip install transformers datasets # Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git ''' lowercase_ : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] lowercase_ : str = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
652
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ : List[Any] = { '''configuration_table_transformer''': [ '''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TableTransformerConfig''', '''TableTransformerOnnxConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : str = [ '''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TableTransformerForObjectDetection''', '''TableTransformerModel''', '''TableTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_table_transformer import ( TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TableTransformerConfig, TableTransformerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_table_transformer import ( TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TableTransformerForObjectDetection, TableTransformerModel, TableTransformerPreTrainedModel, ) else: import sys lowercase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
652
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase_ : Optional[Any] = { '''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''], '''tokenization_roc_bert''': ['''RoCBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Tuple = [ '''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoCBertForCausalLM''', '''RoCBertForMaskedLM''', '''RoCBertForMultipleChoice''', '''RoCBertForPreTraining''', '''RoCBertForQuestionAnswering''', '''RoCBertForSequenceClassification''', '''RoCBertForTokenClassification''', '''RoCBertLayer''', '''RoCBertModel''', '''RoCBertPreTrainedModel''', '''load_tf_weights_in_roc_bert''', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
652
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ : Union[str, Any] = { '''configuration_nllb_moe''': [ '''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NllbMoeConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[int] = [ '''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NllbMoeForConditionalGeneration''', '''NllbMoeModel''', '''NllbMoePreTrainedModel''', '''NllbMoeTop2Router''', '''NllbMoeSparseMLP''', ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys lowercase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
652
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase_ : Optional[int] = { '''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Any = [ '''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MegaForCausalLM''', '''MegaForMaskedLM''', '''MegaForMultipleChoice''', '''MegaForQuestionAnswering''', '''MegaForSequenceClassification''', '''MegaForTokenClassification''', '''MegaModel''', '''MegaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
652
1
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask lowercase_ : Tuple = logging.getLogger(__name__) class lowercase ( a_ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Dict=-1 ): '''simple docstring''' _snake_case : str = label_idx def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[Split, str] ): '''simple docstring''' if isinstance(lowerCamelCase_ , lowerCamelCase_ ): _snake_case : Union[str, Any] = mode.value _snake_case : Optional[int] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' ) _snake_case : Dict = 1 _snake_case : List[str] = [] with open(lowerCamelCase_ , encoding='utf-8' ) as f: _snake_case : List[Any] = [] _snake_case : int = [] for line in f: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) ) guid_index += 1 _snake_case : Optional[Any] = [] _snake_case : Union[str, Any] = [] else: _snake_case : Tuple = line.split(' ' ) words.append(splits[0] ) if len(lowerCamelCase_ ) > 1: labels.append(splits[self.label_idx].replace('\n' , '' ) ) else: # Examples could have no label for mode = "test" labels.append('O' ) if words: examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) ) return examples def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ): '''simple docstring''' _snake_case : str = 0 for line in test_input_reader: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": writer.write(lowerCamelCase_ ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: _snake_case : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n' writer.write(lowerCamelCase_ ) else: logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] ) def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ): '''simple docstring''' if path: with open(lowerCamelCase_ , 'r' ) as f: _snake_case : Optional[int] = f.read().splitlines() if "O" not in labels: _snake_case : Optional[int] = ['O'] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class lowercase ( a_ ): """simple docstring""" def __init__( self : Optional[int] ): '''simple docstring''' super().__init__(label_idx=-2 ) def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ): '''simple docstring''' if path: with open(lowerCamelCase_ , 'r' ) as f: _snake_case : str = f.read().splitlines() if "O" not in labels: _snake_case : Union[str, Any] = ['O'] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class lowercase ( a_ ): """simple docstring""" def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[Split, str] ): '''simple docstring''' if isinstance(lowerCamelCase_ , lowerCamelCase_ ): _snake_case : str = mode.value _snake_case : List[str] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' ) _snake_case : Tuple = 1 _snake_case : List[str] = [] with open(lowerCamelCase_ , encoding='utf-8' ) as f: for sentence in parse_incr(lowerCamelCase_ ): _snake_case : List[str] = [] _snake_case : str = [] for token in sentence: words.append(token['form'] ) labels.append(token['upos'] ) assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) if words: examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) ) guid_index += 1 return examples def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ): '''simple docstring''' _snake_case : Dict = 0 for sentence in parse_incr(lowerCamelCase_ ): _snake_case : Optional[int] = preds_list[example_id] _snake_case : List[Any] = '' for token in sentence: out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) ''' out += "\n" writer.write(lowerCamelCase_ ) example_id += 1 def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ): '''simple docstring''' if path: with open(lowerCamelCase_ , 'r' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
652
import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) lowercase_ : Optional[Any] = pytest.mark.integration @pytest.mark.parametrize('path' , ['paws', 'csv'] ) def A__( __lowerCAmelCase , __lowerCAmelCase ): inspect_dataset(__lowerCAmelCase , __lowerCAmelCase ) _snake_case : str = path + '.py' assert script_name in os.listdir(__lowerCAmelCase ) assert "__pycache__" not in os.listdir(__lowerCAmelCase ) @pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.parametrize('path' , ['accuracy'] ) def A__( __lowerCAmelCase , __lowerCAmelCase ): inspect_metric(__lowerCAmelCase , __lowerCAmelCase ) _snake_case : Tuple = path + '.py' assert script_name in os.listdir(__lowerCAmelCase ) assert "__pycache__" not in os.listdir(__lowerCAmelCase ) @pytest.mark.parametrize( 'path, config_name, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Union[str, Any] = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): with pytest.raises(__lowerCAmelCase ): get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase ) @pytest.mark.parametrize( 'path, expected' , [ ('squad', 'plain_text'), ('acronym_identification', 'default'), ('lhoestq/squad', 'plain_text'), ('lhoestq/test', 'default'), ('lhoestq/demo1', 'lhoestq--demo1'), ('dalle-mini/wit', 'dalle-mini--wit'), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Optional[Any] = get_dataset_config_names(__lowerCAmelCase ) assert expected in config_names @pytest.mark.parametrize( 'path, expected_configs, expected_splits_in_first_config' , [ ('squad', ['plain_text'], ['train', 'validation']), ('dalle-mini/wit', ['dalle-mini--wit'], ['train']), ('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : List[Any] = get_dataset_infos(__lowerCAmelCase ) assert list(infos.keys() ) == expected_configs _snake_case : Any = expected_configs[0] assert expected_config in infos _snake_case : str = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( 'path, expected_config, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Any = get_dataset_infos(__lowerCAmelCase ) assert expected_config in infos _snake_case : Any = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): with pytest.raises(__lowerCAmelCase ): get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
652
1
from itertools import count def A__( __lowerCAmelCase = 50 ): _snake_case : str = [1] * min_block_length for n in count(__lowerCAmelCase ): fill_count_functions.append(1 ) for block_length in range(__lowerCAmelCase , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_00_00_00: break return n if __name__ == "__main__": print(F'''{solution() = }''')
652
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): # Initialise PyTorch model _snake_case : Optional[int] = BertConfig.from_json_file(__lowerCAmelCase ) print(F'''Building PyTorch model from configuration: {config}''' ) _snake_case : List[str] = BertForPreTraining(__lowerCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __lowerCAmelCase ) if __name__ == "__main__": lowercase_ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowercase_ : List[str] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
652
1
import operator as op def A__( __lowerCAmelCase ): _snake_case : Union[str, Any] = [] _snake_case : Optional[Any] = lambda __lowerCAmelCase , __lowerCAmelCase : int(x / y ) # noqa: E731 integer division operation _snake_case : Optional[Any] = { '^': op.pow, '*': op.mul, '/': div, '+': op.add, '-': op.sub, } # operators & their respective operation # print table header print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' ) print('-' * (30 + len(__lowerCAmelCase )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(__lowerCAmelCase ) # append x to stack # output in tabular format print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(__lowerCAmelCase ) , sep=' | ' ) else: _snake_case : Any = stack.pop() # pop stack # output in tabular format print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(__lowerCAmelCase ) , sep=' | ' ) _snake_case : Union[str, Any] = stack.pop() # pop stack # output in tabular format print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(__lowerCAmelCase ) , sep=' | ' ) stack.append( str(opr[x](int(__lowerCAmelCase ) , int(__lowerCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(__lowerCAmelCase ) , sep=' | ' , ) return int(stack[0] ) if __name__ == "__main__": lowercase_ : List[Any] = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''') print('''\n\tResult = ''', solve(Postfix))
652
import itertools import math def A__( __lowerCAmelCase ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A__( ): _snake_case : Optional[Any] = 2 while True: if is_prime(__lowerCAmelCase ): yield num num += 1 def A__( __lowerCAmelCase = 1_00_01 ): return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) ) if __name__ == "__main__": print(F'''{solution() = }''')
652
1
import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class lowercase ( nn.Module ): """simple docstring""" def __init__( self : str ): '''simple docstring''' super().__init__() _snake_case : Optional[Any] = nn.Linear(3 , 4 ) _snake_case : Dict = nn.BatchNormad(4 ) _snake_case : Union[str, Any] = nn.Linear(4 , 5 ) def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[str] ): '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_ ) ) ) class lowercase ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : str ): '''simple docstring''' _snake_case : Any = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(lowerCamelCase_ , model.state_dict() ) _snake_case : Optional[int] = os.path.join(lowerCamelCase_ , 'index.json' ) self.assertTrue(os.path.isfile(lowerCamelCase_ ) ) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: _snake_case : Dict = os.path.join(lowerCamelCase_ , f'''{key}.dat''' ) self.assertTrue(os.path.isfile(lowerCamelCase_ ) ) # TODO: add tests on the fact weights are properly loaded def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' _snake_case : List[str] = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: _snake_case : int = torch.randn(2 , 3 , dtype=lowerCamelCase_ ) with TemporaryDirectory() as tmp_dir: _snake_case : Any = offload_weight(lowerCamelCase_ , 'weight' , lowerCamelCase_ , {} ) _snake_case : Dict = os.path.join(lowerCamelCase_ , 'weight.dat' ) self.assertTrue(os.path.isfile(lowerCamelCase_ ) ) self.assertDictEqual(lowerCamelCase_ , {'weight': {'shape': [2, 3], 'dtype': str(lowerCamelCase_ ).split('.' )[1]}} ) _snake_case : Optional[Any] = load_offloaded_weight(lowerCamelCase_ , index['weight'] ) self.assertTrue(torch.equal(lowerCamelCase_ , lowerCamelCase_ ) ) def __UpperCAmelCase ( self : str ): '''simple docstring''' _snake_case : int = ModelForTest() _snake_case : int = model.state_dict() _snake_case : Optional[Any] = {k: v for k, v in state_dict.items() if 'linear2' not in k} _snake_case : Dict = {k: v for k, v in state_dict.items() if 'linear2' in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Optional[Any] = OffloadedWeightsLoader(state_dict=lowerCamelCase_ , save_folder=lowerCamelCase_ ) # Every key is there with the right value self.assertEqual(sorted(lowerCamelCase_ ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(lowerCamelCase_ , weight_map[key] ) ) _snake_case : Dict = {k: v for k, v in state_dict.items() if 'weight' in k} _snake_case : List[str] = {k: v for k, v in state_dict.items() if 'weight' not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : int = OffloadedWeightsLoader(state_dict=lowerCamelCase_ , save_folder=lowerCamelCase_ ) # Every key is there with the right value self.assertEqual(sorted(lowerCamelCase_ ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(lowerCamelCase_ , weight_map[key] ) ) with TemporaryDirectory() as tmp_dir: offload_state_dict(lowerCamelCase_ , lowerCamelCase_ ) # Duplicates are removed _snake_case : Optional[Any] = OffloadedWeightsLoader(state_dict=lowerCamelCase_ , save_folder=lowerCamelCase_ ) # Every key is there with the right value self.assertEqual(sorted(lowerCamelCase_ ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(lowerCamelCase_ , weight_map[key] ) ) def __UpperCAmelCase ( self : str ): '''simple docstring''' _snake_case : str = {'a.1': 0, 'a.10': 1, 'a.2': 2} _snake_case : Any = extract_submodules_state_dict(lowerCamelCase_ , ['a.1', 'a.2'] ) self.assertDictEqual(lowerCamelCase_ , {'a.1': 0, 'a.2': 2} ) _snake_case : str = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2} _snake_case : Union[str, Any] = extract_submodules_state_dict(lowerCamelCase_ , ['a.1', 'a.2'] ) self.assertDictEqual(lowerCamelCase_ , {'a.1.a': 0, 'a.2.a': 2} )
652
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowercase_ : Dict = '''bert-base-cased''' lowercase_ : Any = '''google/pegasus-xsum''' lowercase_ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] lowercase_ : Tuple = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] lowercase_ : Any = '''patrickvonplaten/t5-tiny-random''' lowercase_ : List[Any] = '''sshleifer/bart-tiny-random''' lowercase_ : Dict = '''sshleifer/tiny-mbart''' lowercase_ : str = '''sshleifer/tiny-marian-en-de''' def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : str = '\n'.join(__lowerCAmelCase ) Path(__lowerCAmelCase ).open('w' ).writelines(__lowerCAmelCase ) def A__( __lowerCAmelCase ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.source''' ) , __lowerCAmelCase ) _dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.target''' ) , __lowerCAmelCase ) return tmp_dir class lowercase ( a_ ): """simple docstring""" @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ): '''simple docstring''' _snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) _snake_case : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) _snake_case : Optional[int] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES ) _snake_case : Any = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES ) _snake_case : Dict = 4 _snake_case : Any = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated _snake_case , _snake_case : Optional[Any] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error. _snake_case : int = SeqaSeqDataset( lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , ) _snake_case : List[str] = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place _snake_case : List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str ): '''simple docstring''' _snake_case : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) _snake_case : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) _snake_case : Dict = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES ) _snake_case : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES ) _snake_case : Union[str, Any] = 4 _snake_case : Optional[int] = LegacySeqaSeqDataset( lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=20 , max_target_length=lowerCamelCase_ , ) _snake_case : Dict = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def __UpperCAmelCase ( self : Dict ): '''simple docstring''' _snake_case : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' ) _snake_case : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) _snake_case : Any = tmp_dir.joinpath('train.source' ).open().readlines() _snake_case : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(lowerCamelCase_ , lowerCamelCase_ , 1_28 , lowerCamelCase_ ) _snake_case : Tuple = {x.name for x in tmp_dir.iterdir()} _snake_case : Dict = {x.name for x in save_dir.iterdir()} _snake_case : str = save_dir.joinpath('train.source' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(lowerCamelCase_ ) < len(lowerCamelCase_ ) assert len(lowerCamelCase_ ) == 1 assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' ) def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' if not FAIRSEQ_AVAILABLE: return _snake_case , _snake_case , _snake_case : int = self._get_dataset(max_len=64 ) _snake_case : List[str] = 64 _snake_case : str = ds.make_dynamic_sampler(lowerCamelCase_ , required_batch_size_multiple=lowerCamelCase_ ) _snake_case : Optional[Any] = [len(lowerCamelCase_ ) for x in batch_sampler] assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples _snake_case : Union[str, Any] = DataLoader(lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 ) _snake_case : List[Any] = [] _snake_case : List[Any] = [] for batch in data_loader: _snake_case : Any = batch['input_ids'].shape _snake_case : str = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple _snake_case : int = np.product(batch['input_ids'].shape ) num_src_per_batch.append(lowerCamelCase_ ) if num_src_tokens > (max_tokens * 1.1): failures.append(lowerCamelCase_ ) assert num_src_per_batch[0] == max(lowerCamelCase_ ) if failures: raise AssertionError(f'''too many tokens in {len(lowerCamelCase_ )} batches''' ) def __UpperCAmelCase ( self : Optional[int] ): '''simple docstring''' _snake_case , _snake_case , _snake_case : str = self._get_dataset(max_len=5_12 ) _snake_case : Optional[Any] = 2 _snake_case : Dict = ds.make_sortish_sampler(lowerCamelCase_ , shuffle=lowerCamelCase_ ) _snake_case : int = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 ) _snake_case : str = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase_ ) _snake_case : Tuple = tokenizer.pad_token_id def count_pad_tokens(lowerCamelCase_ : List[str] , lowerCamelCase_ : Any="input_ids" ): return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) < sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) ) assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple=10_00 , lowerCamelCase_ : Tuple=1_28 ): '''simple docstring''' if os.getenv('USE_REAL_DATA' , lowerCamelCase_ ): _snake_case : Dict = 'examples/seq2seq/wmt_en_ro' _snake_case : List[Any] = max_len * 2 * 64 if not Path(lowerCamelCase_ ).joinpath('train.len' ).exists(): save_len_file(lowerCamelCase_ , lowerCamelCase_ ) else: _snake_case : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro' _snake_case : List[Any] = max_len * 4 save_len_file(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) _snake_case : str = SeqaSeqDataset( lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , n_obs=lowerCamelCase_ , ) return ds, max_tokens, tokenizer def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _snake_case , _snake_case , _snake_case : Any = self._get_dataset() _snake_case : List[str] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase_ ) ) _snake_case : Optional[Any] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase_ ) ) assert idsa.intersection(lowerCamelCase_ ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] ): '''simple docstring''' _snake_case : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ ) if tok_name == MBART_TINY: _snake_case : int = SeqaSeqDataset( lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , ) _snake_case : Optional[Any] = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: _snake_case : Tuple = SeqaSeqDataset( lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , ) _snake_case : List[Any] = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
652
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase_ : List[str] = logging.get_logger(__name__) lowercase_ : List[str] = '''▁''' lowercase_ : Union[str, Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''} lowercase_ : Dict = { '''vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''', }, '''monolingual_vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''', }, } lowercase_ : List[str] = {'''vinai/bartpho-syllable''': 1024} class lowercase ( a_ ): """simple docstring""" _UpperCamelCase : Tuple = VOCAB_FILES_NAMES _UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : List[Any] = ["input_ids", "attention_mask"] def __init__( self : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any]="<s>" , lowerCamelCase_ : int="</s>" , lowerCamelCase_ : Any="</s>" , lowerCamelCase_ : Optional[Any]="<s>" , lowerCamelCase_ : Union[str, Any]="<unk>" , lowerCamelCase_ : str="<pad>" , lowerCamelCase_ : Optional[Any]="<mask>" , lowerCamelCase_ : Optional[Dict[str, Any]] = None , **lowerCamelCase_ : Tuple , ): '''simple docstring''' _snake_case : List[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token _snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , ) _snake_case : Optional[Any] = vocab_file _snake_case : Optional[int] = monolingual_vocab_file _snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCamelCase_ ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _snake_case : List[str] = {} _snake_case : Union[str, Any] = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(lowerCamelCase_ ) not in self.fairseq_tokens_to_ids: _snake_case : Optional[Any] = cnt cnt += 1 with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f: for line in f.readlines(): _snake_case : Tuple = line.strip().split()[0] _snake_case : Optional[int] = len(self.fairseq_tokens_to_ids ) if str(lowerCamelCase_ ) not in self.fairseq_tokens_to_ids: _snake_case : int = len(self.fairseq_tokens_to_ids ) _snake_case : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Tuple ): '''simple docstring''' _snake_case : Tuple = self.__dict__.copy() _snake_case : Optional[int] = None _snake_case : Any = self.sp_model.serialized_model_proto() return state def __setstate__( self : int , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' _snake_case : Tuple = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): _snake_case : Optional[int] = {} _snake_case : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _snake_case : Any = [self.cls_token_id] _snake_case : Optional[int] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' _snake_case : Union[str, Any] = [self.sep_token_id] _snake_case : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' return len(self.fairseq_ids_to_tokens ) def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : int = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : str ): '''simple docstring''' return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ ) def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Dict ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def __UpperCAmelCase ( self : str , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return self.fairseq_ids_to_tokens[index] def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' _snake_case : str = ''.join(lowerCamelCase_ ).replace(lowerCamelCase_ , ' ' ).strip() return out_string def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(lowerCamelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _snake_case : Any = os.path.join( lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) _snake_case : List[Any] = os.path.join( lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase_ , 'wb' ) as fi: _snake_case : Optional[int] = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase_ ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( lowerCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , lowerCamelCase_ ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(lowerCamelCase_ )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
652
from __future__ import annotations def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Dict = list(range(len(__lowerCAmelCase ) ) ) _snake_case : Optional[int] = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )] index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=__lowerCAmelCase ) _snake_case : float = 0 _snake_case : list[float] = [0] * len(__lowerCAmelCase ) for i in index: if weight[i] <= capacity: _snake_case : List[Any] = 1 max_value += value[i] capacity -= weight[i] else: _snake_case : Any = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
652
1
from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : List[str] = k_size // 2 _snake_case , _snake_case : List[Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center] _snake_case : int = 1 / (2 * pi * sigma) * exp(-(square(__lowerCAmelCase ) + square(__lowerCAmelCase )) / (2 * square(__lowerCAmelCase )) ) return g def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case , _snake_case : Optional[Any] = image.shape[0], image.shape[1] # dst image height and width _snake_case : int = height - k_size + 1 _snake_case : Optional[Any] = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows _snake_case : Union[str, Any] = zeros((dst_height * dst_width, k_size * k_size) ) _snake_case : int = 0 for i, j in product(range(__lowerCAmelCase ) , range(__lowerCAmelCase ) ): _snake_case : Optional[Any] = ravel(image[i : i + k_size, j : j + k_size] ) _snake_case : List[str] = window row += 1 # turn the kernel into shape(k*k, 1) _snake_case : List[str] = gen_gaussian_kernel(__lowerCAmelCase , __lowerCAmelCase ) _snake_case : Tuple = ravel(__lowerCAmelCase ) # reshape and get the dst image _snake_case : Any = dot(__lowerCAmelCase , __lowerCAmelCase ).reshape(__lowerCAmelCase , __lowerCAmelCase ).astype(__lowerCAmelCase ) return dst if __name__ == "__main__": # read original image lowercase_ : List[Any] = imread(r'''../image_data/lena.jpg''') # turn image in gray scale value lowercase_ : Tuple = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size lowercase_ : int = gaussian_filter(gray, 3, sigma=1) lowercase_ : Union[str, Any] = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow('''gaussian filter with 3x3 mask''', gaussianaxa) imshow('''gaussian filter with 5x5 mask''', gaussianaxa) waitKey()
652
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase_ : Any = { '''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''], '''tokenization_canine''': ['''CanineTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[Any] = [ '''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CanineForMultipleChoice''', '''CanineForQuestionAnswering''', '''CanineForSequenceClassification''', '''CanineForTokenClassification''', '''CanineLayer''', '''CanineModel''', '''CaninePreTrainedModel''', '''load_tf_weights_in_canine''', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
652
1
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor lowercase_ : int = logging.get_logger(__name__) class lowercase ( a_ ): """simple docstring""" def __init__( self : Optional[Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' warnings.warn( 'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use CLIPImageProcessor instead.' , lowerCamelCase_ , ) super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
652
import math def A__( __lowerCAmelCase ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A__( __lowerCAmelCase = 1_00_01 ): try: _snake_case : int = int(__lowerCAmelCase ) except (TypeError, ValueError): raise TypeError('Parameter nth must be int or castable to int.' ) from None if nth <= 0: raise ValueError('Parameter nth must be greater than or equal to one.' ) _snake_case : list[int] = [] _snake_case : List[Any] = 2 while len(__lowerCAmelCase ) < nth: if is_prime(__lowerCAmelCase ): primes.append(__lowerCAmelCase ) num += 1 else: num += 1 return primes[len(__lowerCAmelCase ) - 1] if __name__ == "__main__": print(F'''{solution() = }''')
652
1
from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : int = cva.getAffineTransform(__lowerCAmelCase , __lowerCAmelCase ) return cva.warpAffine(__lowerCAmelCase , __lowerCAmelCase , (rows, cols) ) if __name__ == "__main__": # read original image lowercase_ : Optional[Any] = cva.imread( str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''') ) # turn image in gray scale value lowercase_ : Any = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape lowercase_ , lowercase_ : int = gray_img.shape # set different points to rotate image lowercase_ : Union[str, Any] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa) lowercase_ : int = np.array([[10, 100], [200, 50], [100, 250]], np.floataa) lowercase_ : str = np.array([[50, 50], [150, 50], [120, 200]], np.floataa) lowercase_ : str = np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list lowercase_ : Optional[int] = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations lowercase_ : Tuple = plt.figure(1) lowercase_ : Optional[int] = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3'''] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''') plt.title(titles[i]) plt.axis('''off''') plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
652
import torch from transformers import AutoModel class lowercase ( torch.nn.Module ): """simple docstring""" def __init__( self : Tuple , lowerCamelCase_ : Dict="sayef/fsner-bert-base-uncased" ): '''simple docstring''' super(lowerCamelCase_ , self ).__init__() _snake_case : Optional[Any] = AutoModel.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ ) _snake_case : str = torch.nn.CosineSimilarity(3 , 1e-08 ) _snake_case : str = torch.nn.Softmax(dim=1 ) def __UpperCAmelCase ( self : int , **lowerCamelCase_ : List[str] ): '''simple docstring''' return self.bert(**lowerCamelCase_ ).last_hidden_state def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' return token_embeddings.sum(2 , keepdim=lowerCamelCase_ ) def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]=1 ): '''simple docstring''' return self.softmax(T * self.cos(lowerCamelCase_ , lowerCamelCase_ ) ) def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' _snake_case : int = W_supports['sizes'].tolist() _snake_case : int = W_supports['start_token_id'].item() _snake_case : List[str] = W_supports['end_token_id'].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] _snake_case : Optional[int] = self.BERT(**lowerCamelCase_ ) _snake_case : Optional[Any] = self.BERT(**lowerCamelCase_ ) _snake_case : Optional[int] = None _snake_case : Optional[int] = None _snake_case : List[str] = W_supports['input_ids'] == start_token_id _snake_case : Union[str, Any] = W_supports['input_ids'] == end_token_id for i, size in enumerate(lowerCamelCase_ ): if i == 0: _snake_case : str = 0 else: _snake_case : Union[str, Any] = support_sizes[i - 1] _snake_case : Tuple = S[s : s + size][start_token_masks[s : s + size]] _snake_case : Optional[int] = S[s : s + size][end_token_masks[s : s + size]] _snake_case : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) _snake_case : Union[str, Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: _snake_case : Optional[Any] = torch.vstack((p_starts, p_start) ) _snake_case : List[str] = torch.vstack((p_ends, p_end) ) else: _snake_case : Union[str, Any] = p_start _snake_case : Any = p_end return p_starts, p_ends
652
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowercase ( a_ , a_ , a_ , unittest.TestCase ): """simple docstring""" _UpperCamelCase : List[str] = StableDiffusionInstructPixaPixPipeline _UpperCamelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"} _UpperCamelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS _UpperCamelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS _UpperCamelCase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS def __UpperCAmelCase ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) _snake_case : Optional[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) _snake_case : Any = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) torch.manual_seed(0 ) _snake_case : Optional[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) _snake_case : Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) _snake_case : Tuple = CLIPTextModel(lowerCamelCase_ ) _snake_case : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) _snake_case : Any = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str]=0 ): '''simple docstring''' _snake_case : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ ) _snake_case : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] _snake_case : Union[str, Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ) if str(lowerCamelCase_ ).startswith('mps' ): _snake_case : Any = torch.manual_seed(lowerCamelCase_ ) else: _snake_case : List[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) _snake_case : str = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'image_guidance_scale': 1, 'output_type': 'numpy', } return inputs def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' _snake_case : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator _snake_case : Optional[Any] = self.get_dummy_components() _snake_case : Any = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ ) _snake_case : Optional[int] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : int = self.get_dummy_inputs(lowerCamelCase_ ) _snake_case : Tuple = sd_pipe(**lowerCamelCase_ ).images _snake_case : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _snake_case : List[str] = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' _snake_case : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator _snake_case : List[Any] = self.get_dummy_components() _snake_case : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ ) _snake_case : Optional[Any] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : Any = self.get_dummy_inputs(lowerCamelCase_ ) _snake_case : Optional[Any] = 'french fries' _snake_case : List[str] = sd_pipe(**lowerCamelCase_ , negative_prompt=lowerCamelCase_ ) _snake_case : Any = output.images _snake_case : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _snake_case : List[str] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' _snake_case : str = 'cpu' # ensure determinism for the device-dependent torch.Generator _snake_case : Tuple = self.get_dummy_components() _snake_case : Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ ) _snake_case : Dict = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : List[str] = self.get_dummy_inputs(lowerCamelCase_ ) _snake_case : int = [inputs['prompt']] * 2 _snake_case : Union[str, Any] = np.array(inputs['image'] ).astype(np.floataa ) / 255.0 _snake_case : Optional[Any] = torch.from_numpy(lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ ) _snake_case : Dict = image / 2 + 0.5 _snake_case : List[str] = image.permute(0 , 3 , 1 , 2 ) _snake_case : Any = image.repeat(2 , 1 , 1 , 1 ) _snake_case : Optional[int] = sd_pipe(**lowerCamelCase_ ).images _snake_case : Any = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) _snake_case : Any = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' _snake_case : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator _snake_case : Dict = self.get_dummy_components() _snake_case : List[Any] = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' ) _snake_case : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ ) _snake_case : int = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : Any = self.get_dummy_inputs(lowerCamelCase_ ) _snake_case : int = sd_pipe(**lowerCamelCase_ ).images _snake_case : str = image[0, -3:, -3:, -1] _snake_case : Tuple = [round(lowerCamelCase_ , 4 ) for x in image_slice.flatten().tolist()] print(','.join([str(lowerCamelCase_ ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) _snake_case : Any = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __UpperCAmelCase ( self : int ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def __UpperCAmelCase ( self : Optional[int] ): '''simple docstring''' _snake_case : str = self.get_dummy_components() _snake_case : int = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ ) _snake_case : Optional[int] = VaeImageProcessor(do_resize=lowerCamelCase_ , do_normalize=lowerCamelCase_ ) _snake_case : Any = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : List[Any] = pipe(**self.get_dummy_inputs_by_type(lowerCamelCase_ , input_image_type='pt' ) )[0] _snake_case : Optional[Any] = components['vae'] _snake_case : Dict = self.get_dummy_inputs_by_type(lowerCamelCase_ , input_image_type='pt' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): _snake_case : Optional[Any] = vae.encode(inputs[image_param] ).latent_dist.mode() _snake_case : Any = pipe(**lowerCamelCase_ )[0] _snake_case : Union[str, Any] = np.abs(out - out_latents_inputs ).max() self.assertLess(lowerCamelCase_ , 1e-4 , 'passing latents as image input generate different result from passing image' ) @slow @require_torch_gpu class lowercase ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int=0 ): '''simple docstring''' _snake_case : int = torch.manual_seed(lowerCamelCase_ ) _snake_case : str = load_image( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' ) _snake_case : Optional[Any] = { 'prompt': 'turn him into a cyborg', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'image_guidance_scale': 1.0, 'output_type': 'numpy', } return inputs def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=lowerCamelCase_ ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) pipe.enable_attention_slicing() _snake_case : Tuple = self.get_inputs() _snake_case : Optional[Any] = pipe(**lowerCamelCase_ ).images _snake_case : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 5_12, 3) _snake_case : Union[str, Any] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' _snake_case : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=lowerCamelCase_ ) _snake_case : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) pipe.enable_attention_slicing() _snake_case : Optional[int] = self.get_inputs() _snake_case : Tuple = pipe(**lowerCamelCase_ ).images _snake_case : int = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 5_12, 3) _snake_case : List[Any] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' _snake_case : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=lowerCamelCase_ ) _snake_case : Tuple = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) pipe.enable_attention_slicing() _snake_case : List[Any] = self.get_inputs() _snake_case : str = pipe(**lowerCamelCase_ ).images _snake_case : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 5_12, 3) _snake_case : Dict = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __UpperCAmelCase ( self : Dict ): '''simple docstring''' _snake_case : List[str] = 0 def callback_fn(lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : torch.FloatTensor ) -> None: _snake_case : List[Any] = True nonlocal number_of_steps number_of_steps += 1 if step == 1: _snake_case : Dict = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) _snake_case : List[str] = latents[0, -3:, -3:, -1] _snake_case : Union[str, Any] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: _snake_case : str = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) _snake_case : List[str] = latents[0, -3:, -3:, -1] _snake_case : Dict = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 _snake_case : Dict = False _snake_case : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=lowerCamelCase_ , torch_dtype=torch.floataa ) _snake_case : List[Any] = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) pipe.enable_attention_slicing() _snake_case : Optional[Any] = self.get_inputs() pipe(**lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _snake_case : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=lowerCamelCase_ , torch_dtype=torch.floataa ) _snake_case : List[Any] = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _snake_case : Dict = self.get_inputs() _snake_case : List[str] = pipe(**lowerCamelCase_ ) _snake_case : int = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def __UpperCAmelCase ( self : Any ): '''simple docstring''' _snake_case : Union[str, Any] = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 _snake_case : Optional[Any] = inputs['image'].resize((5_04, 5_04) ) _snake_case : Tuple = 'timbrooks/instruct-pix2pix' _snake_case : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( lowerCamelCase_ , safety_checker=lowerCamelCase_ , ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) pipe.enable_attention_slicing() _snake_case : Tuple = pipe(**lowerCamelCase_ ) _snake_case : str = output.images[0] _snake_case : Tuple = image[2_55:2_58, 3_83:3_86, -1] assert image.shape == (5_04, 5_04, 3) _snake_case : Optional[int] = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
652
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowercase ( a_ , a_ , a_ ): """simple docstring""" @register_to_config def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , ): '''simple docstring''' super().__init__() _snake_case : Optional[int] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : List[str] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Union[str, Any] = False _snake_case : Tuple = nn.Dropout(p=lowerCamelCase_ ) _snake_case : Union[str, Any] = TaConfig( vocab_size=lowerCamelCase_ , d_model=lowerCamelCase_ , num_heads=lowerCamelCase_ , d_kv=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , feed_forward_proj=lowerCamelCase_ , is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , ) _snake_case : Union[str, Any] = nn.ModuleList() for lyr_num in range(lowerCamelCase_ ): _snake_case : Any = TaBlock(lowerCamelCase_ ) self.encoders.append(lowerCamelCase_ ) _snake_case : Tuple = TaLayerNorm(lowerCamelCase_ ) _snake_case : List[str] = nn.Dropout(p=lowerCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple ): '''simple docstring''' _snake_case : Any = self.token_embedder(lowerCamelCase_ ) _snake_case : List[Any] = encoder_input_tokens.shape[1] _snake_case : Any = torch.arange(lowerCamelCase_ , device=encoder_input_tokens.device ) x += self.position_encoding(lowerCamelCase_ ) _snake_case : Tuple = self.dropout_pre(lowerCamelCase_ ) # inverted the attention mask _snake_case : Dict = encoder_input_tokens.size() _snake_case : Optional[int] = self.get_extended_attention_mask(lowerCamelCase_ , lowerCamelCase_ ) for lyr in self.encoders: _snake_case : str = lyr(lowerCamelCase_ , lowerCamelCase_ )[0] _snake_case : Any = self.layer_norm(lowerCamelCase_ ) return self.dropout_post(lowerCamelCase_ ), encoder_inputs_mask
652
1
from ....configuration_utils import PretrainedConfig from ....utils import logging lowercase_ : str = logging.get_logger(__name__) lowercase_ : List[Any] = { '''Visual-Attention-Network/van-base''': ( '''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json''' ), } class lowercase ( a_ ): """simple docstring""" _UpperCamelCase : Union[str, Any] = "van" def __init__( self : Any , lowerCamelCase_ : str=2_24 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Any=[7, 3, 3, 3] , lowerCamelCase_ : Tuple=[4, 2, 2, 2] , lowerCamelCase_ : Any=[64, 1_28, 3_20, 5_12] , lowerCamelCase_ : Optional[Any]=[3, 3, 12, 3] , lowerCamelCase_ : Optional[int]=[8, 8, 4, 4] , lowerCamelCase_ : Tuple="gelu" , lowerCamelCase_ : Tuple=0.02 , lowerCamelCase_ : str=1e-6 , lowerCamelCase_ : Tuple=1e-2 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : List[Any]=0.0 , **lowerCamelCase_ : Tuple , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) _snake_case : List[str] = image_size _snake_case : List[Any] = num_channels _snake_case : int = patch_sizes _snake_case : List[str] = strides _snake_case : Tuple = hidden_sizes _snake_case : Optional[int] = depths _snake_case : str = mlp_ratios _snake_case : Tuple = hidden_act _snake_case : int = initializer_range _snake_case : Optional[int] = layer_norm_eps _snake_case : Any = layer_scale_init_value _snake_case : List[Any] = drop_path_rate _snake_case : Optional[Any] = dropout_rate
652
def A__( __lowerCAmelCase ): assert column_title.isupper() _snake_case : List[Any] = 0 _snake_case : List[str] = len(__lowerCAmelCase ) - 1 _snake_case : Dict = 0 while index >= 0: _snake_case : List[str] = (ord(column_title[index] ) - 64) * pow(26 , __lowerCAmelCase ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
652
1
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor lowercase_ : str = logging.get_logger(__name__) class lowercase ( a_ ): """simple docstring""" def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ): '''simple docstring''' warnings.warn( 'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , ) super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
652
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets lowercase_ : List[str] = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' lowercase_ : Optional[int] = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' lowercase_ : Any = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): """simple docstring""" def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if version.parse(scb.__version__ ) < version.parse('1.4.12' ): raise ImportWarning( 'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n' 'You can install it with `pip install "sacrebleu>=1.4.12"`.' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ), } ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[ 'https://github.com/jhclark/tercom', ] , ) def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ): '''simple docstring''' _snake_case : str = len(references[0] ) if any(len(lowerCamelCase_ ) != references_per_prediction for refs in references ): raise ValueError('Sacrebleu requires the same number of references for each prediction' ) _snake_case : int = [[refs[i] for refs in references] for i in range(lowerCamelCase_ )] _snake_case : Optional[int] = TER( normalized=lowerCamelCase_ , no_punct=lowerCamelCase_ , asian_support=lowerCamelCase_ , case_sensitive=lowerCamelCase_ , ) _snake_case : Optional[Any] = sb_ter.corpus_score(lowerCamelCase_ , lowerCamelCase_ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
652
1
import unittest import numpy as np from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING from transformers.pipelines import AudioClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torchaudio, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class lowercase ( unittest.TestCase ): """simple docstring""" _UpperCamelCase : int = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING _UpperCamelCase : int = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ): '''simple docstring''' _snake_case : Optional[Any] = AudioClassificationPipeline(model=lowerCamelCase_ , feature_extractor=lowerCamelCase_ ) # test with a raw waveform _snake_case : List[Any] = np.zeros((3_40_00,) ) _snake_case : Optional[int] = np.zeros((1_40_00,) ) return audio_classifier, [audioa, audio] def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ): '''simple docstring''' _snake_case , _snake_case : Optional[int] = examples _snake_case : Union[str, Any] = audio_classifier(lowerCamelCase_ ) # by default a model is initialized with num_labels=2 self.assertEqual( lowerCamelCase_ , [ {'score': ANY(lowerCamelCase_ ), 'label': ANY(lowerCamelCase_ )}, {'score': ANY(lowerCamelCase_ ), 'label': ANY(lowerCamelCase_ )}, ] , ) _snake_case : Dict = audio_classifier(lowerCamelCase_ , top_k=1 ) self.assertEqual( lowerCamelCase_ , [ {'score': ANY(lowerCamelCase_ ), 'label': ANY(lowerCamelCase_ )}, ] , ) self.run_torchaudio(lowerCamelCase_ ) @require_torchaudio def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : List[Any] ): '''simple docstring''' import datasets # test with a local file _snake_case : Dict = datasets.load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' ) _snake_case : Any = dataset[0]['audio']['array'] _snake_case : Optional[int] = audio_classifier(lowerCamelCase_ ) self.assertEqual( lowerCamelCase_ , [ {'score': ANY(lowerCamelCase_ ), 'label': ANY(lowerCamelCase_ )}, {'score': ANY(lowerCamelCase_ ), 'label': ANY(lowerCamelCase_ )}, ] , ) @require_torch def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' _snake_case : str = 'anton-l/wav2vec2-random-tiny-classifier' _snake_case : List[str] = pipeline('audio-classification' , model=lowerCamelCase_ ) _snake_case : Any = np.ones((80_00,) ) _snake_case : Any = audio_classifier(lowerCamelCase_ , top_k=4 ) _snake_case : Optional[Any] = [ {'score': 0.0842, 'label': 'no'}, {'score': 0.0838, 'label': 'up'}, {'score': 0.0837, 'label': 'go'}, {'score': 0.0834, 'label': 'right'}, ] _snake_case : Any = [ {'score': 0.0845, 'label': 'stop'}, {'score': 0.0844, 'label': 'on'}, {'score': 0.0841, 'label': 'right'}, {'score': 0.0834, 'label': 'left'}, ] self.assertIn(nested_simplify(lowerCamelCase_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) _snake_case : Optional[Any] = {'array': np.ones((80_00,) ), 'sampling_rate': audio_classifier.feature_extractor.sampling_rate} _snake_case : str = audio_classifier(lowerCamelCase_ , top_k=4 ) self.assertIn(nested_simplify(lowerCamelCase_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) @require_torch @slow def __UpperCAmelCase ( self : Optional[int] ): '''simple docstring''' import datasets _snake_case : List[str] = 'superb/wav2vec2-base-superb-ks' _snake_case : Tuple = pipeline('audio-classification' , model=lowerCamelCase_ ) _snake_case : Optional[int] = datasets.load_dataset('anton-l/superb_dummy' , 'ks' , split='test' ) _snake_case : Union[str, Any] = np.array(dataset[3]['speech'] , dtype=np.floataa ) _snake_case : Any = audio_classifier(lowerCamelCase_ , top_k=4 ) self.assertEqual( nested_simplify(lowerCamelCase_ , decimals=3 ) , [ {'score': 0.981, 'label': 'go'}, {'score': 0.007, 'label': 'up'}, {'score': 0.006, 'label': '_unknown_'}, {'score': 0.001, 'label': 'down'}, ] , ) @require_tf @unittest.skip('Audio classification is not implemented for TF' ) def __UpperCAmelCase ( self : Any ): '''simple docstring''' pass
652
import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels lowercase_ : Optional[int] = object() # For specifying empty leaf dict `{}` lowercase_ : List[Any] = object() def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Optional[Any] = tuple((re.compile(x + '$' ) for x in qs) ) for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ): _snake_case : Tuple = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , ks[i:] )] if matches and all(__lowerCAmelCase ): return True return False def A__( __lowerCAmelCase ): def replace(__lowerCAmelCase , __lowerCAmelCase ): for rule, replacement in rules: if _match(__lowerCAmelCase , __lowerCAmelCase ): return replacement return val return replace def A__( ): return [ # embeddings (("transformer", "wpe", "embedding"), P('mp' , __lowerCAmelCase )), (("transformer", "wte", "embedding"), P('mp' , __lowerCAmelCase )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase , 'mp' )), (("attention", "out_proj", "kernel"), P('mp' , __lowerCAmelCase )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__lowerCAmelCase , 'mp' )), (("mlp", "c_fc", "bias"), P('mp' )), (("mlp", "c_proj", "kernel"), P('mp' , __lowerCAmelCase )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def A__( __lowerCAmelCase ): _snake_case : Optional[Any] = _get_partition_rules() _snake_case : Optional[Any] = _replacement_rules(__lowerCAmelCase ) _snake_case : str = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )} _snake_case : str = {k: replace(__lowerCAmelCase , __lowerCAmelCase ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__lowerCAmelCase ) )
652
1
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 lowercase_ : str = get_tests_dir('''fixtures''') class lowercase ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' _snake_case : Optional[Any] = mock.Mock() _snake_case : Dict = 5_00 _snake_case : Optional[int] = {} _snake_case : Optional[Any] = HTTPError _snake_case : str = {} # Download this model to make sure it's in the cache. _snake_case : List[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request' , return_value=lowerCamelCase_ ) as mock_head: _snake_case : Tuple = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' ) # This check we did call the fake head request mock_head.assert_called() def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _snake_case : Optional[Any] = ViTImageProcessor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' ) def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' with self.assertRaises(lowerCamelCase_ ): # config is in subfolder, the following should not work without specifying the subfolder _snake_case : List[Any] = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' ) _snake_case : List[str] = AutoImageProcessor.from_pretrained( 'hf-internal-testing/stable-diffusion-all-variants' , subfolder='feature_extractor' ) self.assertIsNotNone(lowerCamelCase_ ) @is_staging_test class lowercase ( unittest.TestCase ): """simple docstring""" @classmethod def __UpperCAmelCase ( cls : Tuple ): '''simple docstring''' _snake_case : Tuple = TOKEN HfFolder.save_token(lowerCamelCase_ ) @classmethod def __UpperCAmelCase ( cls : List[str] ): '''simple docstring''' try: delete_repo(token=cls._token , repo_id='test-image-processor' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-image-processor-org' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='test-dynamic-image-processor' ) except HTTPError: pass def __UpperCAmelCase ( self : Dict ): '''simple docstring''' _snake_case : Any = ViTImageProcessor.from_pretrained(lowerCamelCase_ ) image_processor.push_to_hub('test-image-processor' , use_auth_token=self._token ) _snake_case : int = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) # Reset repo delete_repo(token=self._token , repo_id='test-image-processor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( lowerCamelCase_ , repo_id='test-image-processor' , push_to_hub=lowerCamelCase_ , use_auth_token=self._token ) _snake_case : Any = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _snake_case : Tuple = ViTImageProcessor.from_pretrained(lowerCamelCase_ ) image_processor.push_to_hub('valid_org/test-image-processor' , use_auth_token=self._token ) _snake_case : Any = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-image-processor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( lowerCamelCase_ , repo_id='valid_org/test-image-processor-org' , push_to_hub=lowerCamelCase_ , use_auth_token=self._token ) _snake_case : Optional[int] = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' ) for k, v in image_processor.__dict__.items(): self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' CustomImageProcessor.register_for_auto_class() _snake_case : Dict = CustomImageProcessor.from_pretrained(lowerCamelCase_ ) image_processor.push_to_hub('test-dynamic-image-processor' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'} , ) _snake_case : Optional[Any] = AutoImageProcessor.from_pretrained( f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=lowerCamelCase_ ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , 'CustomImageProcessor' )
652
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel lowercase_ : Any = logging.getLogger(__name__) def A__( __lowerCAmelCase , __lowerCAmelCase ): # save results if os.path.exists(__lowerCAmelCase ): if os.path.exists(os.path.join(__lowerCAmelCase , 'config.json' ) ) and os.path.isfile( os.path.join(__lowerCAmelCase , 'config.json' ) ): os.remove(os.path.join(__lowerCAmelCase , 'config.json' ) ) if os.path.exists(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ): os.remove(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) else: os.makedirs(__lowerCAmelCase ) model.save_pretrained(__lowerCAmelCase ) def A__( __lowerCAmelCase , __lowerCAmelCase=False ): _snake_case : int = 2 if unlogit: _snake_case : Dict = torch.pow(__lowerCAmelCase , __lowerCAmelCase ) _snake_case : Optional[int] = p * torch.log(__lowerCAmelCase ) _snake_case : List[str] = 0 return -plogp.sum(dim=-1 ) def A__( __lowerCAmelCase ): logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(__lowerCAmelCase ) ) ) ) for row in range(len(__lowerCAmelCase ) ): if tensor.dtype != torch.long: logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False ): _snake_case , _snake_case : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads _snake_case : Optional[int] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device ) _snake_case : Union[str, Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device ) if head_mask is None: _snake_case : List[str] = torch.ones(__lowerCAmelCase , __lowerCAmelCase ).to(args.device ) head_mask.requires_grad_(requires_grad=__lowerCAmelCase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _snake_case : List[str] = None _snake_case : str = 0.0 _snake_case : List[str] = 0.0 for step, inputs in enumerate(tqdm(__lowerCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ): _snake_case : Dict = tuple(t.to(args.device ) for t in inputs ) ((_snake_case) , ) : int = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _snake_case : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase , head_mask=__lowerCAmelCase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _snake_case , _snake_case , _snake_case : Dict = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__lowerCAmelCase ): _snake_case : int = entropy(attn.detach() , __lowerCAmelCase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__lowerCAmelCase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _snake_case : List[str] = 2 _snake_case : Any = torch.pow(torch.pow(__lowerCAmelCase , __lowerCAmelCase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: _snake_case : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(__lowerCAmelCase ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(__lowerCAmelCase ) logger.info('Head ranked by importance scores' ) _snake_case : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _snake_case : Optional[Any] = torch.arange( head_importance.numel() , device=args.device ) _snake_case : Union[str, Any] = head_ranks.view_as(__lowerCAmelCase ) print_ad_tensor(__lowerCAmelCase ) return attn_entropy, head_importance, total_loss def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case , _snake_case , _snake_case : int = compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase ) _snake_case : List[str] = 1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' , __lowerCAmelCase , original_score * args.masking_threshold ) _snake_case : Optional[Any] = torch.ones_like(__lowerCAmelCase ) _snake_case : Tuple = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _snake_case : Dict = original_score while current_score >= original_score * args.masking_threshold: _snake_case : List[str] = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _snake_case : Optional[Any] = float('Inf' ) _snake_case : Optional[int] = head_importance.view(-1 ).sort()[1] if len(__lowerCAmelCase ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads _snake_case : List[Any] = current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) ) _snake_case : Tuple = new_head_mask.view(-1 ) _snake_case : List[str] = 0.0 _snake_case : int = new_head_mask.view_as(__lowerCAmelCase ) _snake_case : Optional[Any] = new_head_mask.clone().detach() print_ad_tensor(__lowerCAmelCase ) # Compute metric and head importance again _snake_case , _snake_case , _snake_case : List[str] = compute_heads_importance( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , head_mask=__lowerCAmelCase ) _snake_case : Union[str, Any] = 1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' , __lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , ) logger.info('Final head mask' ) print_ad_tensor(__lowerCAmelCase ) np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() ) return head_mask def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : List[Any] = datetime.now() _snake_case , _snake_case , _snake_case : List[Any] = compute_heads_importance( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase ) _snake_case : str = 1 / loss _snake_case : Optional[int] = datetime.now() - before_time _snake_case : Optional[int] = sum(p.numel() for p in model.parameters() ) _snake_case : Optional[Any] = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCAmelCase ) ) } for k, v in heads_to_prune.items(): if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _snake_case : Optional[int] = [ v, ] assert sum(len(__lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__lowerCAmelCase ) _snake_case : Optional[Any] = sum(p.numel() for p in model.parameters() ) _snake_case : List[str] = datetime.now() _snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase , actually_pruned=__lowerCAmelCase , ) _snake_case : Dict = 1 / loss _snake_case : str = datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __lowerCAmelCase , __lowerCAmelCase , pruned_num_params / original_num_params * 1_00 , ) logger.info('Pruning: score with masking: %f score with pruning: %f' , __lowerCAmelCase , __lowerCAmelCase ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 ) save_model(__lowerCAmelCase , args.output_dir ) def A__( ): _snake_case : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , ) parser.add_argument( '--model_name_or_path' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , ) # Other parameters parser.add_argument( '--config_name' , default='' , type=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , ) parser.add_argument( '--tokenizer_name' , default='' , type=__lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , ) parser.add_argument( '--cache_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , ) parser.add_argument( '--data_subset' , type=__lowerCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , ) parser.add_argument( '--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' , default=0.9 , type=__lowerCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , ) parser.add_argument( '--masking_amount' , default=0.1 , type=__lowerCAmelCase , help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' , default='acc' , type=__lowerCAmelCase , help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' , default=1_28 , type=__lowerCAmelCase , help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) , ) parser.add_argument('--batch_size' , default=1 , type=__lowerCAmelCase , help='Batch size.' ) parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 ) parser.add_argument('--local_rank' , type=__lowerCAmelCase , default=-1 , help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' ) _snake_case : List[str] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _snake_case : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) _snake_case : Any = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _snake_case : Union[str, Any] = torch.device('cuda' , args.local_rank ) _snake_case : Dict = 1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _snake_case : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _snake_case : List[str] = nn.parallel.DistributedDataParallel( __lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCAmelCase ) elif args.n_gpu > 1: _snake_case : int = nn.DataParallel(__lowerCAmelCase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase ) torch.save(__lowerCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' , __lowerCAmelCase ) # Prepare dataset _snake_case : str = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _snake_case : Dict = (torch.from_numpy(__lowerCAmelCase ),) _snake_case : List[Any] = TensorDataset(*__lowerCAmelCase ) _snake_case : List[str] = RandomSampler(__lowerCAmelCase ) _snake_case : List[str] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _snake_case : Union[str, Any] = mask_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) prune_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": main()
652
1
from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
652
def A__( __lowerCAmelCase ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError('only integers accepted as input' ) else: _snake_case : Any = str(abs(__lowerCAmelCase ) ) _snake_case : List[str] = [list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )] for index in range(len(__lowerCAmelCase ) ): num_transpositions[index].pop(__lowerCAmelCase ) return max( int(''.join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
652
1
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable lowercase_ : Tuple = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : int = ['''GPTNeoXTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Union[str, Any] = [ '''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXForCausalLM''', '''GPTNeoXForQuestionAnswering''', '''GPTNeoXForSequenceClassification''', '''GPTNeoXForTokenClassification''', '''GPTNeoXLayer''', '''GPTNeoXModel''', '''GPTNeoXPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
652
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask lowercase_ : Tuple = logging.getLogger(__name__) class lowercase ( a_ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Dict=-1 ): '''simple docstring''' _snake_case : str = label_idx def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[Split, str] ): '''simple docstring''' if isinstance(lowerCamelCase_ , lowerCamelCase_ ): _snake_case : Union[str, Any] = mode.value _snake_case : Optional[int] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' ) _snake_case : Dict = 1 _snake_case : List[str] = [] with open(lowerCamelCase_ , encoding='utf-8' ) as f: _snake_case : List[Any] = [] _snake_case : int = [] for line in f: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) ) guid_index += 1 _snake_case : Optional[Any] = [] _snake_case : Union[str, Any] = [] else: _snake_case : Tuple = line.split(' ' ) words.append(splits[0] ) if len(lowerCamelCase_ ) > 1: labels.append(splits[self.label_idx].replace('\n' , '' ) ) else: # Examples could have no label for mode = "test" labels.append('O' ) if words: examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) ) return examples def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ): '''simple docstring''' _snake_case : str = 0 for line in test_input_reader: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": writer.write(lowerCamelCase_ ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: _snake_case : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n' writer.write(lowerCamelCase_ ) else: logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] ) def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ): '''simple docstring''' if path: with open(lowerCamelCase_ , 'r' ) as f: _snake_case : Optional[int] = f.read().splitlines() if "O" not in labels: _snake_case : Optional[int] = ['O'] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class lowercase ( a_ ): """simple docstring""" def __init__( self : Optional[int] ): '''simple docstring''' super().__init__(label_idx=-2 ) def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ): '''simple docstring''' if path: with open(lowerCamelCase_ , 'r' ) as f: _snake_case : str = f.read().splitlines() if "O" not in labels: _snake_case : Union[str, Any] = ['O'] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class lowercase ( a_ ): """simple docstring""" def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[Split, str] ): '''simple docstring''' if isinstance(lowerCamelCase_ , lowerCamelCase_ ): _snake_case : str = mode.value _snake_case : List[str] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' ) _snake_case : Tuple = 1 _snake_case : List[str] = [] with open(lowerCamelCase_ , encoding='utf-8' ) as f: for sentence in parse_incr(lowerCamelCase_ ): _snake_case : List[str] = [] _snake_case : str = [] for token in sentence: words.append(token['form'] ) labels.append(token['upos'] ) assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) if words: examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) ) guid_index += 1 return examples def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ): '''simple docstring''' _snake_case : Dict = 0 for sentence in parse_incr(lowerCamelCase_ ): _snake_case : Optional[int] = preds_list[example_id] _snake_case : List[Any] = '' for token in sentence: out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) ''' out += "\n" writer.write(lowerCamelCase_ ) example_id += 1 def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ): '''simple docstring''' if path: with open(lowerCamelCase_ , 'r' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
652
1
import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowercase_ : Dict = logging.get_logger(__name__) lowercase_ : Tuple = { '''microsoft/conditional-detr-resnet-50''': ( '''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json''' ), } class lowercase ( a_ ): """simple docstring""" _UpperCamelCase : Tuple = "conditional_detr" _UpperCamelCase : List[str] = ["past_key_values"] _UpperCamelCase : int = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self : Union[str, Any] , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : List[str]=3_00 , lowerCamelCase_ : int=6 , lowerCamelCase_ : Optional[Any]=20_48 , lowerCamelCase_ : Any=8 , lowerCamelCase_ : Tuple=6 , lowerCamelCase_ : List[str]=20_48 , lowerCamelCase_ : Dict=8 , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : Optional[int]="relu" , lowerCamelCase_ : Dict=2_56 , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : List[Any]=0.02 , lowerCamelCase_ : Union[str, Any]=1.0 , lowerCamelCase_ : Union[str, Any]=False , lowerCamelCase_ : str="sine" , lowerCamelCase_ : Dict="resnet50" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=False , lowerCamelCase_ : Union[str, Any]=2 , lowerCamelCase_ : Tuple=5 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : Union[str, Any]=1 , lowerCamelCase_ : str=1 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : Optional[Any]=5 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : str=0.25 , **lowerCamelCase_ : Dict , ): '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' ) if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) _snake_case : str = CONFIG_MAPPING['resnet'](out_features=['stage4'] ) elif isinstance(lowerCamelCase_ , lowerCamelCase_ ): _snake_case : List[Any] = backbone_config.get('model_type' ) _snake_case : str = CONFIG_MAPPING[backbone_model_type] _snake_case : str = config_class.from_dict(lowerCamelCase_ ) _snake_case : int = use_timm_backbone _snake_case : Optional[int] = backbone_config _snake_case : Tuple = num_channels _snake_case : List[str] = num_queries _snake_case : List[str] = d_model _snake_case : Dict = encoder_ffn_dim _snake_case : Union[str, Any] = encoder_layers _snake_case : Union[str, Any] = encoder_attention_heads _snake_case : Dict = decoder_ffn_dim _snake_case : Any = decoder_layers _snake_case : List[str] = decoder_attention_heads _snake_case : Any = dropout _snake_case : str = attention_dropout _snake_case : List[Any] = activation_dropout _snake_case : List[str] = activation_function _snake_case : Any = init_std _snake_case : int = init_xavier_std _snake_case : str = encoder_layerdrop _snake_case : Tuple = decoder_layerdrop _snake_case : Optional[Any] = encoder_layers _snake_case : Optional[Any] = auxiliary_loss _snake_case : Any = position_embedding_type _snake_case : Optional[Any] = backbone _snake_case : List[Any] = use_pretrained_backbone _snake_case : List[str] = dilation # Hungarian matcher _snake_case : List[Any] = class_cost _snake_case : int = bbox_cost _snake_case : Optional[int] = giou_cost # Loss coefficients _snake_case : Any = mask_loss_coefficient _snake_case : str = dice_loss_coefficient _snake_case : Dict = cls_loss_coefficient _snake_case : str = bbox_loss_coefficient _snake_case : Any = giou_loss_coefficient _snake_case : Optional[Any] = focal_alpha super().__init__(is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_ ) @property def __UpperCAmelCase ( self : Dict ): '''simple docstring''' return self.encoder_attention_heads @property def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' return self.d_model def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' _snake_case : int = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: _snake_case : Optional[int] = self.backbone_config.to_dict() _snake_case : Union[str, Any] = self.__class__.model_type return output class lowercase ( a_ ): """simple docstring""" _UpperCamelCase : Optional[int] = version.parse("1.11" ) @property def __UpperCAmelCase ( self : int ): '''simple docstring''' return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'}), ] ) @property def __UpperCAmelCase ( self : Optional[int] ): '''simple docstring''' return 1e-5 @property def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' return 12
652
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class lowercase ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __UpperCAmelCase ( self : Optional[int] ): '''simple docstring''' _snake_case : Tuple = 1 _snake_case : str = 3 _snake_case : List[str] = (32, 32) _snake_case : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ ) return image @property def __UpperCAmelCase ( self : str ): '''simple docstring''' torch.manual_seed(0 ) _snake_case : Union[str, Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) return model @property def __UpperCAmelCase ( self : Dict ): '''simple docstring''' torch.manual_seed(0 ) _snake_case : Optional[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' torch.manual_seed(0 ) _snake_case : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModel(lowerCamelCase_ ) @property def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' def extract(*lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ): class lowercase : """simple docstring""" def __init__( self : Tuple ): '''simple docstring''' _snake_case : List[str] = torch.ones([0] ) def __UpperCAmelCase ( self : int , lowerCamelCase_ : Tuple ): '''simple docstring''' self.pixel_values.to(lowerCamelCase_ ) return self return Out() return extract def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator _snake_case : int = self.dummy_cond_unet _snake_case : str = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , ) _snake_case : Union[str, Any] = self.dummy_vae _snake_case : Optional[Any] = self.dummy_text_encoder _snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # make sure here that pndm scheduler skips prk _snake_case : Union[str, Any] = StableDiffusionPipeline( unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , ) _snake_case : str = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : List[str] = 'A painting of a squirrel eating a burger' _snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) _snake_case : Optional[int] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' ) _snake_case : Union[str, Any] = output.images _snake_case : List[str] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) _snake_case : Any = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0] _snake_case : Tuple = image[0, -3:, -3:, -1] _snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _snake_case : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' _snake_case : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator _snake_case : List[str] = self.dummy_cond_unet _snake_case : List[str] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) _snake_case : int = self.dummy_vae _snake_case : List[Any] = self.dummy_text_encoder _snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # make sure here that pndm scheduler skips prk _snake_case : Any = StableDiffusionPipeline( unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , ) _snake_case : Union[str, Any] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : str = 'A painting of a squirrel eating a burger' _snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) _snake_case : Tuple = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' ) _snake_case : Optional[Any] = output.images _snake_case : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) _snake_case : Tuple = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0] _snake_case : Dict = image[0, -3:, -3:, -1] _snake_case : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _snake_case : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : Union[str, Any] = StableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowerCamelCase_ ) assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) assert isinstance(pipe.scheduler , lowerCamelCase_ ) assert pipe.safety_checker is None _snake_case : Dict = pipe('example prompt' , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase_ ) _snake_case : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ ) # sanity check that the pipeline still works assert pipe.safety_checker is None _snake_case : Union[str, Any] = pipe('example prompt' , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' _snake_case : Union[str, Any] = self.dummy_cond_unet _snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) _snake_case : Any = self.dummy_vae _snake_case : Optional[Any] = self.dummy_text_encoder _snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # put models in fp16 _snake_case : str = unet.half() _snake_case : Union[str, Any] = vae.half() _snake_case : Dict = bert.half() # make sure here that pndm scheduler skips prk _snake_case : List[str] = StableDiffusionPipeline( unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , ) _snake_case : List[str] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : Tuple = 'A painting of a squirrel eating a burger' _snake_case : Optional[int] = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class lowercase ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' _snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ ) _snake_case : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) _snake_case : Any = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : Optional[int] = ( 'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle' ' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with' ' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and' ' children from bahnhof zoo, detailed ' ) _snake_case : List[str] = 40_03_66_03_46 _snake_case : int = 7 # without safety guidance (sld_guidance_scale = 0) _snake_case : Union[str, Any] = torch.manual_seed(lowerCamelCase_ ) _snake_case : Union[str, Any] = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) _snake_case : str = output.images _snake_case : Dict = image[0, -3:, -3:, -1] _snake_case : Optional[int] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 # without safety guidance (strong configuration) _snake_case : Tuple = torch.manual_seed(lowerCamelCase_ ) _snake_case : int = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) _snake_case : Tuple = output.images _snake_case : int = image[0, -3:, -3:, -1] _snake_case : List[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : str = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ ) _snake_case : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) _snake_case : Any = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : Union[str, Any] = 'padme amidala taking a bath artwork, safe for work, no nudity' _snake_case : Optional[Any] = 27_34_97_17_55 _snake_case : Union[str, Any] = 7 _snake_case : Dict = torch.manual_seed(lowerCamelCase_ ) _snake_case : Tuple = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) _snake_case : Any = output.images _snake_case : int = image[0, -3:, -3:, -1] _snake_case : str = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 _snake_case : Optional[Any] = torch.manual_seed(lowerCamelCase_ ) _snake_case : Any = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) _snake_case : str = output.images _snake_case : List[str] = image[0, -3:, -3:, -1] _snake_case : Union[str, Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' _snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' ) _snake_case : Optional[int] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : List[Any] = ( 'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.' ' leyendecker' ) _snake_case : Union[str, Any] = 10_44_35_52_34 _snake_case : Dict = 12 _snake_case : Optional[int] = torch.manual_seed(lowerCamelCase_ ) _snake_case : Any = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) _snake_case : Optional[int] = output.images _snake_case : int = image[0, -3:, -3:, -1] _snake_case : Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7 _snake_case : List[Any] = torch.manual_seed(lowerCamelCase_ ) _snake_case : Optional[int] = sd_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) _snake_case : str = output.images _snake_case : List[str] = image[0, -3:, -3:, -1] _snake_case : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] ) assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
652
1
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class lowercase ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : str = ['a', 'b', 'c'] # Defaults to last layer if both are None _snake_case , _snake_case : List[str] = get_aligned_output_features_output_indices(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , ['c'] ) self.assertEqual(lowerCamelCase_ , [2] ) # Out indices set to match out features _snake_case , _snake_case : Optional[Any] = get_aligned_output_features_output_indices(['a', 'c'] , lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , ['a', 'c'] ) self.assertEqual(lowerCamelCase_ , [0, 2] ) # Out features set to match out indices _snake_case , _snake_case : Optional[int] = get_aligned_output_features_output_indices(lowerCamelCase_ , [0, 2] , lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , ['a', 'c'] ) self.assertEqual(lowerCamelCase_ , [0, 2] ) # Out features selected from negative indices _snake_case , _snake_case : str = get_aligned_output_features_output_indices(lowerCamelCase_ , [-3, -1] , lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , ['a', 'c'] ) self.assertEqual(lowerCamelCase_ , [-3, -1] ) def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' with self.assertRaises(lowerCamelCase_ ): verify_out_features_out_indices(['a', 'b'] , (0, 1) , lowerCamelCase_ ) # Out features must be a list with self.assertRaises(lowerCamelCase_ ): verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] ) # Out features must be a subset of stage names with self.assertRaises(lowerCamelCase_ ): verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] ) # Out indices must be a list or tuple with self.assertRaises(lowerCamelCase_ ): verify_out_features_out_indices(lowerCamelCase_ , 0 , ['a', 'b'] ) # Out indices must be a subset of stage names with self.assertRaises(lowerCamelCase_ ): verify_out_features_out_indices(lowerCamelCase_ , (0, 1) , ['a'] ) # Out features and out indices must be the same length with self.assertRaises(lowerCamelCase_ ): verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] ) # Out features should match out indices with self.assertRaises(lowerCamelCase_ ): verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] ) # Out features and out indices should be in order with self.assertRaises(lowerCamelCase_ ): verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] ) # Check passes with valid inputs verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] ) def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _snake_case : Tuple = BackboneMixin() _snake_case : Dict = ['a', 'b', 'c'] _snake_case : Tuple = ['a', 'c'] _snake_case : Optional[Any] = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ['a', 'c'] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly _snake_case : Optional[Any] = ['a', 'b'] self.assertEqual(backbone.out_features , ['a', 'b'] ) self.assertEqual(backbone.out_indices , [0, 1] ) _snake_case : str = [-3, -1] self.assertEqual(backbone.out_features , ['a', 'c'] ) self.assertEqual(backbone.out_indices , [-3, -1] )
652
import functools def A__( __lowerCAmelCase , __lowerCAmelCase ): # Validation if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for day in days ): raise ValueError('The parameter days should be a list of integers' ) if len(__lowerCAmelCase ) != 3 or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for cost in costs ): raise ValueError('The parameter costs should be a list of three integers' ) if len(__lowerCAmelCase ) == 0: return 0 if min(__lowerCAmelCase ) <= 0: raise ValueError('All days elements should be greater than 0' ) if max(__lowerCAmelCase ) >= 3_66: raise ValueError('All days elements should be less than 366' ) _snake_case : Optional[int] = set(__lowerCAmelCase ) @functools.cache def dynamic_programming(__lowerCAmelCase ) -> int: if index > 3_65: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
652
1
def A__( __lowerCAmelCase , __lowerCAmelCase ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError('iterations must be defined as integers' ) if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not number >= 1: raise ValueError( 'starting number must be\n and integer and be more than 0' ) if not iterations >= 1: raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' ) _snake_case : Tuple = '' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__lowerCAmelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
652
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor lowercase_ : str = logging.get_logger(__name__) class lowercase ( a_ ): """simple docstring""" def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ): '''simple docstring''' warnings.warn( 'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , ) super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
652
1
import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class lowercase : """simple docstring""" def __init__( self : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int]=13 , lowerCamelCase_ : Dict=7 , lowerCamelCase_ : str=6 , lowerCamelCase_ : Union[str, Any]=17 , lowerCamelCase_ : Union[str, Any]=23 , lowerCamelCase_ : Tuple=11 , lowerCamelCase_ : Optional[Any]=True , ): '''simple docstring''' _snake_case : List[Any] = parent _snake_case : Union[str, Any] = batch_size _snake_case : int = seq_length _snake_case : Dict = act_dim _snake_case : Union[str, Any] = state_dim _snake_case : Optional[int] = hidden_size _snake_case : List[Any] = max_length _snake_case : Union[str, Any] = is_training def __UpperCAmelCase ( self : Optional[int] ): '''simple docstring''' _snake_case : List[str] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) _snake_case : Any = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) _snake_case : List[Any] = floats_tensor((self.batch_size, self.seq_length, 1) ) _snake_case : Any = floats_tensor((self.batch_size, self.seq_length, 1) ) _snake_case : List[str] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00 ) _snake_case : int = random_attention_mask((self.batch_size, self.seq_length) ) _snake_case : Union[str, Any] = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] , ): '''simple docstring''' _snake_case : str = DecisionTransformerModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() _snake_case : str = model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _snake_case : int = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : Union[str, Any] = config_and_inputs _snake_case : List[Any] = { 'states': states, 'actions': actions, 'rewards': rewards, 'returns_to_go': returns_to_go, 'timesteps': timesteps, 'attention_mask': attention_mask, } return config, inputs_dict @require_torch class lowercase ( a_ , a_ , a_ , unittest.TestCase ): """simple docstring""" _UpperCamelCase : Any = (DecisionTransformerModel,) if is_torch_available() else () _UpperCamelCase : Union[str, Any] = () _UpperCamelCase : Optional[int] = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids _UpperCamelCase : int = False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features _UpperCamelCase : Dict = False _UpperCamelCase : str = False _UpperCamelCase : Optional[Any] = False _UpperCamelCase : str = False _UpperCamelCase : Optional[int] = False _UpperCamelCase : Tuple = False _UpperCamelCase : str = False _UpperCamelCase : List[Any] = False _UpperCamelCase : List[Any] = False def __UpperCAmelCase ( self : Dict ): '''simple docstring''' _snake_case : Union[str, Any] = DecisionTransformerModelTester(self ) _snake_case : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 ) def __UpperCAmelCase ( self : int ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) @slow def __UpperCAmelCase ( self : Any ): '''simple docstring''' for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Dict = DecisionTransformerModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' _snake_case , _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : List[Any] = model_class(lowerCamelCase_ ) _snake_case : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : Optional[Any] = [*signature.parameters.keys()] _snake_case : Optional[Any] = [ 'states', 'actions', 'rewards', 'returns_to_go', 'timesteps', 'attention_mask', ] self.assertListEqual(arg_names[: len(lowerCamelCase_ )] , lowerCamelCase_ ) @require_torch class lowercase ( unittest.TestCase ): """simple docstring""" @slow def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _snake_case : Union[str, Any] = 2 # number of steps of autoregressive prediction we will perform _snake_case : str = 10 # defined by the RL environment, may be normalized _snake_case : Optional[Any] = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert' ) _snake_case : int = model.to(lowerCamelCase_ ) _snake_case : Any = model.config torch.manual_seed(0 ) _snake_case : str = torch.randn(1 , 1 , config.state_dim ).to(device=lowerCamelCase_ , dtype=torch.floataa ) # env.reset() _snake_case : int = torch.tensor( [[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=lowerCamelCase_ ) _snake_case : Optional[int] = torch.tensor(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.floataa ).reshape(1 , 1 , 1 ) _snake_case : Union[str, Any] = state _snake_case : int = torch.zeros(1 , 0 , config.act_dim , device=lowerCamelCase_ , dtype=torch.floataa ) _snake_case : int = torch.zeros(1 , 0 , device=lowerCamelCase_ , dtype=torch.floataa ) _snake_case : Optional[int] = torch.tensor(0 , device=lowerCamelCase_ , dtype=torch.long ).reshape(1 , 1 ) for step in range(lowerCamelCase_ ): _snake_case : int = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=lowerCamelCase_ )] , dim=1 ) _snake_case : Any = torch.cat([rewards, torch.zeros(1 , 1 , device=lowerCamelCase_ )] , dim=1 ) _snake_case : int = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): _snake_case , _snake_case , _snake_case : Optional[Any] = model( states=lowerCamelCase_ , actions=lowerCamelCase_ , rewards=lowerCamelCase_ , returns_to_go=lowerCamelCase_ , timesteps=lowerCamelCase_ , attention_mask=lowerCamelCase_ , return_dict=lowerCamelCase_ , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) ) _snake_case , _snake_case , _snake_case , _snake_case : Union[str, Any] = ( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=lowerCamelCase_ , dtype=torch.floataa ), 1.0, False, {}, ) _snake_case : Union[str, Any] = action_pred[0, -1] _snake_case : List[Any] = torch.cat([states, state] , dim=1 ) _snake_case : Dict = returns_to_go[0, -1] - reward _snake_case : str = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) _snake_case : Union[str, Any] = torch.cat( [timesteps, torch.ones((1, 1) , device=lowerCamelCase_ , dtype=torch.long ) * (step + 1)] , dim=1 )
652
from math import factorial def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): if successes > trials: raise ValueError('successes must be lower or equal to trials' ) if trials < 0 or successes < 0: raise ValueError('the function is defined for non-negative integers' ) if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError('the function is defined for non-negative integers' ) if not 0 < prob < 1: raise ValueError('prob has to be in range of 1 - 0' ) _snake_case : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! _snake_case : List[Any] = float(factorial(__lowerCAmelCase ) ) coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('''Probability of 2 successes out of 4 trails''') print('''with probability of 0.75 is:''', end=''' ''') print(binomial_distribution(2, 4, 0.75))
652
1
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowercase ( a_ , a_ , a_ ): """simple docstring""" @register_to_config def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , ): '''simple docstring''' super().__init__() _snake_case : Optional[int] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : List[str] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Union[str, Any] = False _snake_case : Tuple = nn.Dropout(p=lowerCamelCase_ ) _snake_case : Union[str, Any] = TaConfig( vocab_size=lowerCamelCase_ , d_model=lowerCamelCase_ , num_heads=lowerCamelCase_ , d_kv=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , feed_forward_proj=lowerCamelCase_ , is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , ) _snake_case : Union[str, Any] = nn.ModuleList() for lyr_num in range(lowerCamelCase_ ): _snake_case : Any = TaBlock(lowerCamelCase_ ) self.encoders.append(lowerCamelCase_ ) _snake_case : Tuple = TaLayerNorm(lowerCamelCase_ ) _snake_case : List[str] = nn.Dropout(p=lowerCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple ): '''simple docstring''' _snake_case : Any = self.token_embedder(lowerCamelCase_ ) _snake_case : List[Any] = encoder_input_tokens.shape[1] _snake_case : Any = torch.arange(lowerCamelCase_ , device=encoder_input_tokens.device ) x += self.position_encoding(lowerCamelCase_ ) _snake_case : Tuple = self.dropout_pre(lowerCamelCase_ ) # inverted the attention mask _snake_case : Dict = encoder_input_tokens.size() _snake_case : Optional[int] = self.get_extended_attention_mask(lowerCamelCase_ , lowerCamelCase_ ) for lyr in self.encoders: _snake_case : str = lyr(lowerCamelCase_ , lowerCamelCase_ )[0] _snake_case : Any = self.layer_norm(lowerCamelCase_ ) return self.dropout_post(lowerCamelCase_ ), encoder_inputs_mask
652
lowercase_ : Tuple = ''' # Installazione di Transformers ! pip install transformers datasets # Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git ''' lowercase_ : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] lowercase_ : str = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
652
1
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class lowercase ( unittest.TestCase ): """simple docstring""" def __init__( self : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=7 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Union[str, Any]=30 , lowerCamelCase_ : str=4_00 , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : str=None , lowerCamelCase_ : Dict=True , lowerCamelCase_ : List[str]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : str=True , lowerCamelCase_ : List[str]=1 / 2_55 , lowerCamelCase_ : List[Any]=True , ): '''simple docstring''' _snake_case : Tuple = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33} _snake_case : Union[str, Any] = parent _snake_case : Optional[Any] = batch_size _snake_case : Any = num_channels _snake_case : int = min_resolution _snake_case : Dict = max_resolution _snake_case : List[str] = do_resize _snake_case : str = size _snake_case : Optional[Any] = do_normalize _snake_case : Dict = image_mean _snake_case : Optional[Any] = image_std _snake_case : Union[str, Any] = do_rescale _snake_case : Union[str, Any] = rescale_factor _snake_case : List[str] = do_pad def __UpperCAmelCase ( self : Any ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __UpperCAmelCase ( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple=False ): '''simple docstring''' if not batched: _snake_case : Optional[Any] = image_inputs[0] if isinstance(lowerCamelCase_ , Image.Image ): _snake_case , _snake_case : Tuple = image.size else: _snake_case , _snake_case : List[str] = image.shape[1], image.shape[2] if w < h: _snake_case : Tuple = int(self.size['shortest_edge'] * h / w ) _snake_case : Optional[Any] = self.size['shortest_edge'] elif w > h: _snake_case : Union[str, Any] = self.size['shortest_edge'] _snake_case : Optional[int] = int(self.size['shortest_edge'] * w / h ) else: _snake_case : int = self.size['shortest_edge'] _snake_case : int = self.size['shortest_edge'] else: _snake_case : List[str] = [] for image in image_inputs: _snake_case , _snake_case : Tuple = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _snake_case : Dict = max(lowerCamelCase_ , key=lambda lowerCamelCase_ : item[0] )[0] _snake_case : Optional[Any] = max(lowerCamelCase_ , key=lambda lowerCamelCase_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowercase ( a_ , unittest.TestCase ): """simple docstring""" _UpperCamelCase : Dict = DetaImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' _snake_case : Optional[int] = DetaImageProcessingTester(self ) @property def __UpperCAmelCase ( self : str ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self : Any ): '''simple docstring''' _snake_case : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase_ , 'image_mean' ) ) self.assertTrue(hasattr(lowerCamelCase_ , 'image_std' ) ) self.assertTrue(hasattr(lowerCamelCase_ , 'do_normalize' ) ) self.assertTrue(hasattr(lowerCamelCase_ , 'do_resize' ) ) self.assertTrue(hasattr(lowerCamelCase_ , 'do_rescale' ) ) self.assertTrue(hasattr(lowerCamelCase_ , 'do_pad' ) ) self.assertTrue(hasattr(lowerCamelCase_ , 'size' ) ) def __UpperCAmelCase ( self : Dict ): '''simple docstring''' _snake_case : Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} ) self.assertEqual(image_processor.do_pad , lowerCamelCase_ ) def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' pass def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' _snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _snake_case : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , Image.Image ) # Test not batched input _snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _snake_case , _snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _snake_case , _snake_case : List[str] = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ ) _snake_case : List[str] = image_processing(lowerCamelCase_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' _snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _snake_case : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , np.ndarray ) # Test not batched input _snake_case : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _snake_case , _snake_case : Dict = self.image_processor_tester.get_expected_values(lowerCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _snake_case : Optional[int] = image_processing(lowerCamelCase_ , return_tensors='pt' ).pixel_values _snake_case , _snake_case : Any = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCAmelCase ( self : Any ): '''simple docstring''' _snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , torch.Tensor ) # Test not batched input _snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _snake_case , _snake_case : str = self.image_processor_tester.get_expected_values(lowerCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _snake_case : Optional[int] = image_processing(lowerCamelCase_ , return_tensors='pt' ).pixel_values _snake_case , _snake_case : int = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __UpperCAmelCase ( self : str ): '''simple docstring''' _snake_case : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: _snake_case : str = json.loads(f.read() ) _snake_case : List[str] = {'image_id': 3_97_69, 'annotations': target} # encode them _snake_case : Union[str, Any] = DetaImageProcessor() _snake_case : Optional[Any] = image_processing(images=lowerCamelCase_ , annotations=lowerCamelCase_ , return_tensors='pt' ) # verify pixel values _snake_case : int = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['pixel_values'].shape , lowerCamelCase_ ) _snake_case : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowerCamelCase_ , atol=1e-4 ) ) # verify area _snake_case : Tuple = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowerCamelCase_ ) ) # verify boxes _snake_case : int = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , lowerCamelCase_ ) _snake_case : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowerCamelCase_ , atol=1e-3 ) ) # verify image_id _snake_case : Any = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowerCamelCase_ ) ) # verify is_crowd _snake_case : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowerCamelCase_ ) ) # verify class_labels _snake_case : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowerCamelCase_ ) ) # verify orig_size _snake_case : Optional[Any] = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowerCamelCase_ ) ) # verify size _snake_case : List[Any] = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowerCamelCase_ ) ) @slow def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: _snake_case : Union[str, Any] = json.loads(f.read() ) _snake_case : int = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target} _snake_case : List[str] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them _snake_case : Optional[int] = DetaImageProcessor(format='coco_panoptic' ) _snake_case : Any = image_processing(images=lowerCamelCase_ , annotations=lowerCamelCase_ , masks_path=lowerCamelCase_ , return_tensors='pt' ) # verify pixel values _snake_case : str = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['pixel_values'].shape , lowerCamelCase_ ) _snake_case : int = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowerCamelCase_ , atol=1e-4 ) ) # verify area _snake_case : Dict = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowerCamelCase_ ) ) # verify boxes _snake_case : Union[str, Any] = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , lowerCamelCase_ ) _snake_case : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowerCamelCase_ , atol=1e-3 ) ) # verify image_id _snake_case : List[str] = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowerCamelCase_ ) ) # verify is_crowd _snake_case : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowerCamelCase_ ) ) # verify class_labels _snake_case : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowerCamelCase_ ) ) # verify masks _snake_case : Any = 82_28_73 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowerCamelCase_ ) # verify orig_size _snake_case : Dict = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowerCamelCase_ ) ) # verify size _snake_case : Dict = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowerCamelCase_ ) )
652
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase_ : Optional[Any] = { '''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''], '''tokenization_roc_bert''': ['''RoCBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Tuple = [ '''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoCBertForCausalLM''', '''RoCBertForMaskedLM''', '''RoCBertForMultipleChoice''', '''RoCBertForPreTraining''', '''RoCBertForQuestionAnswering''', '''RoCBertForSequenceClassification''', '''RoCBertForTokenClassification''', '''RoCBertLayer''', '''RoCBertModel''', '''RoCBertPreTrainedModel''', '''load_tf_weights_in_roc_bert''', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
652
1
def A__( __lowerCAmelCase ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError('only integers accepted as input' ) else: _snake_case : Any = str(abs(__lowerCAmelCase ) ) _snake_case : List[str] = [list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )] for index in range(len(__lowerCAmelCase ) ): num_transpositions[index].pop(__lowerCAmelCase ) return max( int(''.join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
652
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase_ : Optional[int] = { '''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Any = [ '''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MegaForCausalLM''', '''MegaForMaskedLM''', '''MegaForMultipleChoice''', '''MegaForQuestionAnswering''', '''MegaForSequenceClassification''', '''MegaForTokenClassification''', '''MegaModel''', '''MegaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
652
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase_ : Optional[Any] = { '''configuration_xlm_roberta''': [ '''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaConfig''', '''XLMRobertaOnnxConfig''', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Any = ['''XLMRobertaTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[int] = ['''XLMRobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : int = [ '''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaForCausalLM''', '''XLMRobertaForMaskedLM''', '''XLMRobertaForMultipleChoice''', '''XLMRobertaForQuestionAnswering''', '''XLMRobertaForSequenceClassification''', '''XLMRobertaForTokenClassification''', '''XLMRobertaModel''', '''XLMRobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[int] = [ '''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLMRobertaForCausalLM''', '''TFXLMRobertaForMaskedLM''', '''TFXLMRobertaForMultipleChoice''', '''TFXLMRobertaForQuestionAnswering''', '''TFXLMRobertaForSequenceClassification''', '''TFXLMRobertaForTokenClassification''', '''TFXLMRobertaModel''', '''TFXLMRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : str = [ '''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxXLMRobertaForMaskedLM''', '''FlaxXLMRobertaForCausalLM''', '''FlaxXLMRobertaForMultipleChoice''', '''FlaxXLMRobertaForQuestionAnswering''', '''FlaxXLMRobertaForSequenceClassification''', '''FlaxXLMRobertaForTokenClassification''', '''FlaxXLMRobertaModel''', '''FlaxXLMRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys lowercase_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
652
import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) lowercase_ : Optional[Any] = pytest.mark.integration @pytest.mark.parametrize('path' , ['paws', 'csv'] ) def A__( __lowerCAmelCase , __lowerCAmelCase ): inspect_dataset(__lowerCAmelCase , __lowerCAmelCase ) _snake_case : str = path + '.py' assert script_name in os.listdir(__lowerCAmelCase ) assert "__pycache__" not in os.listdir(__lowerCAmelCase ) @pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.parametrize('path' , ['accuracy'] ) def A__( __lowerCAmelCase , __lowerCAmelCase ): inspect_metric(__lowerCAmelCase , __lowerCAmelCase ) _snake_case : Tuple = path + '.py' assert script_name in os.listdir(__lowerCAmelCase ) assert "__pycache__" not in os.listdir(__lowerCAmelCase ) @pytest.mark.parametrize( 'path, config_name, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Union[str, Any] = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): with pytest.raises(__lowerCAmelCase ): get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase ) @pytest.mark.parametrize( 'path, expected' , [ ('squad', 'plain_text'), ('acronym_identification', 'default'), ('lhoestq/squad', 'plain_text'), ('lhoestq/test', 'default'), ('lhoestq/demo1', 'lhoestq--demo1'), ('dalle-mini/wit', 'dalle-mini--wit'), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Optional[Any] = get_dataset_config_names(__lowerCAmelCase ) assert expected in config_names @pytest.mark.parametrize( 'path, expected_configs, expected_splits_in_first_config' , [ ('squad', ['plain_text'], ['train', 'validation']), ('dalle-mini/wit', ['dalle-mini--wit'], ['train']), ('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : List[Any] = get_dataset_infos(__lowerCAmelCase ) assert list(infos.keys() ) == expected_configs _snake_case : Any = expected_configs[0] assert expected_config in infos _snake_case : str = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( 'path, expected_config, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Any = get_dataset_infos(__lowerCAmelCase ) assert expected_config in infos _snake_case : Any = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): with pytest.raises(__lowerCAmelCase ): get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
652
1
from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowercase : """simple docstring""" _UpperCamelCase : Dict = XGLMConfig _UpperCamelCase : str = {} _UpperCamelCase : Optional[Any] = "gelu" def __init__( self : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str=14 , lowerCamelCase_ : Optional[int]=7 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : List[Any]=99 , lowerCamelCase_ : Optional[Any]=32 , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Tuple=4 , lowerCamelCase_ : Any=37 , lowerCamelCase_ : Tuple="gelu" , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any=5_12 , lowerCamelCase_ : List[str]=0.02 , ): '''simple docstring''' _snake_case : int = parent _snake_case : Optional[Any] = batch_size _snake_case : List[str] = seq_length _snake_case : Optional[Any] = is_training _snake_case : List[str] = use_input_mask _snake_case : str = use_labels _snake_case : Any = vocab_size _snake_case : Optional[int] = d_model _snake_case : int = num_hidden_layers _snake_case : Optional[int] = num_attention_heads _snake_case : Optional[int] = ffn_dim _snake_case : Optional[int] = activation_function _snake_case : Dict = activation_dropout _snake_case : Union[str, Any] = attention_dropout _snake_case : Optional[int] = max_position_embeddings _snake_case : Any = initializer_range _snake_case : Tuple = None _snake_case : int = 0 _snake_case : int = 2 _snake_case : Optional[Any] = 1 def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' return XGLMConfig.from_pretrained('facebook/xglm-564M' ) def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _snake_case : Tuple = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) _snake_case : List[Any] = None if self.use_input_mask: _snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) _snake_case : Dict = self.get_config() _snake_case : str = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def __UpperCAmelCase ( self : Dict ): '''simple docstring''' return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCamelCase_ , ) def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' _snake_case : Tuple = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : Union[str, Any] = config_and_inputs _snake_case : int = { 'input_ids': input_ids, 'head_mask': head_mask, } return config, inputs_dict @require_tf class lowercase ( a_ , a_ , unittest.TestCase ): """simple docstring""" _UpperCamelCase : List[str] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () _UpperCamelCase : Optional[Any] = (TFXGLMForCausalLM,) if is_tf_available() else () _UpperCamelCase : Tuple = ( {"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {} ) _UpperCamelCase : List[Any] = False _UpperCamelCase : int = False _UpperCamelCase : List[Any] = False def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _snake_case : List[str] = TFXGLMModelTester(self ) _snake_case : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase_ , n_embd=37 ) def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() @slow def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : str = TFXGLMModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) @unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' ) def __UpperCAmelCase ( self : Any ): '''simple docstring''' super().test_resize_token_embeddings() @require_tf class lowercase ( unittest.TestCase ): """simple docstring""" @slow def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : Dict=True ): '''simple docstring''' _snake_case : Union[str, Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) _snake_case : str = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _snake_case : int = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81] # fmt: on _snake_case : Dict = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase_ ) @slow def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' _snake_case : Optional[int] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) _snake_case : str = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) tf.random.set_seed(0 ) _snake_case : List[Any] = tokenizer('Today is a nice day and' , return_tensors='tf' ) _snake_case : List[Any] = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(':/CPU:0' ): _snake_case : Any = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ , seed=[7, 0] ) _snake_case : Tuple = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase_ ) _snake_case : Optional[Any] = ( 'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due' ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) @slow def __UpperCAmelCase ( self : str ): '''simple docstring''' _snake_case : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) _snake_case : int = XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) _snake_case : Optional[int] = 'left' # use different length sentences to test batching _snake_case : Optional[int] = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When', 'Hello, my dog is a little', ] _snake_case : List[Any] = tokenizer(lowerCamelCase_ , return_tensors='tf' , padding=lowerCamelCase_ ) _snake_case : int = inputs['input_ids'] _snake_case : Union[str, Any] = model.generate(input_ids=lowerCamelCase_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 ) _snake_case : Dict = tokenizer(sentences[0] , return_tensors='tf' ).input_ids _snake_case : str = model.generate(input_ids=lowerCamelCase_ , max_new_tokens=12 ) _snake_case : Optional[int] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids _snake_case : Optional[int] = model.generate(input_ids=lowerCamelCase_ , max_new_tokens=12 ) _snake_case : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ ) _snake_case : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase_ ) _snake_case : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase_ ) _snake_case : Any = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ' 'a single', 'Hello, my dog is a little bit of a shy one, but he is very friendly', ] self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , [non_padded_sentence, padded_sentence] )
652
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): # Initialise PyTorch model _snake_case : Optional[int] = BertConfig.from_json_file(__lowerCAmelCase ) print(F'''Building PyTorch model from configuration: {config}''' ) _snake_case : List[str] = BertForPreTraining(__lowerCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __lowerCAmelCase ) if __name__ == "__main__": lowercase_ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowercase_ : List[str] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
652
1
import os from datetime import datetime as dt from github import Github lowercase_ : Optional[int] = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''enhancement''', '''new pipeline/model''', '''new scheduler''', '''wip''', ] def A__( ): _snake_case : str = Github(os.environ['GITHUB_TOKEN'] ) _snake_case : Any = g.get_repo('huggingface/diffusers' ) _snake_case : List[Any] = repo.get_issues(state='open' ) for issue in open_issues: _snake_case : Optional[int] = sorted(issue.get_comments() , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase ) _snake_case : Tuple = comments[0] if len(__lowerCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='closed' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='open' ) issue.remove_from_labels('stale' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) issue.add_to_labels('stale' ) if __name__ == "__main__": main()
652
import itertools import math def A__( __lowerCAmelCase ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A__( ): _snake_case : Optional[Any] = 2 while True: if is_prime(__lowerCAmelCase ): yield num num += 1 def A__( __lowerCAmelCase = 1_00_01 ): return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) ) if __name__ == "__main__": print(F'''{solution() = }''')
652
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase_ : Optional[int] = { '''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Any = [ '''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MegaForCausalLM''', '''MegaForMaskedLM''', '''MegaForMultipleChoice''', '''MegaForQuestionAnswering''', '''MegaForSequenceClassification''', '''MegaForTokenClassification''', '''MegaModel''', '''MegaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
652
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowercase_ : Dict = '''bert-base-cased''' lowercase_ : Any = '''google/pegasus-xsum''' lowercase_ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] lowercase_ : Tuple = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] lowercase_ : Any = '''patrickvonplaten/t5-tiny-random''' lowercase_ : List[Any] = '''sshleifer/bart-tiny-random''' lowercase_ : Dict = '''sshleifer/tiny-mbart''' lowercase_ : str = '''sshleifer/tiny-marian-en-de''' def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : str = '\n'.join(__lowerCAmelCase ) Path(__lowerCAmelCase ).open('w' ).writelines(__lowerCAmelCase ) def A__( __lowerCAmelCase ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.source''' ) , __lowerCAmelCase ) _dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.target''' ) , __lowerCAmelCase ) return tmp_dir class lowercase ( a_ ): """simple docstring""" @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ): '''simple docstring''' _snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) _snake_case : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) _snake_case : Optional[int] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES ) _snake_case : Any = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES ) _snake_case : Dict = 4 _snake_case : Any = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated _snake_case , _snake_case : Optional[Any] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error. _snake_case : int = SeqaSeqDataset( lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , ) _snake_case : List[str] = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place _snake_case : List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str ): '''simple docstring''' _snake_case : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) _snake_case : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) _snake_case : Dict = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES ) _snake_case : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES ) _snake_case : Union[str, Any] = 4 _snake_case : Optional[int] = LegacySeqaSeqDataset( lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=20 , max_target_length=lowerCamelCase_ , ) _snake_case : Dict = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def __UpperCAmelCase ( self : Dict ): '''simple docstring''' _snake_case : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' ) _snake_case : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) _snake_case : Any = tmp_dir.joinpath('train.source' ).open().readlines() _snake_case : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(lowerCamelCase_ , lowerCamelCase_ , 1_28 , lowerCamelCase_ ) _snake_case : Tuple = {x.name for x in tmp_dir.iterdir()} _snake_case : Dict = {x.name for x in save_dir.iterdir()} _snake_case : str = save_dir.joinpath('train.source' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(lowerCamelCase_ ) < len(lowerCamelCase_ ) assert len(lowerCamelCase_ ) == 1 assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' ) def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' if not FAIRSEQ_AVAILABLE: return _snake_case , _snake_case , _snake_case : int = self._get_dataset(max_len=64 ) _snake_case : List[str] = 64 _snake_case : str = ds.make_dynamic_sampler(lowerCamelCase_ , required_batch_size_multiple=lowerCamelCase_ ) _snake_case : Optional[Any] = [len(lowerCamelCase_ ) for x in batch_sampler] assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples _snake_case : Union[str, Any] = DataLoader(lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 ) _snake_case : List[Any] = [] _snake_case : List[Any] = [] for batch in data_loader: _snake_case : Any = batch['input_ids'].shape _snake_case : str = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple _snake_case : int = np.product(batch['input_ids'].shape ) num_src_per_batch.append(lowerCamelCase_ ) if num_src_tokens > (max_tokens * 1.1): failures.append(lowerCamelCase_ ) assert num_src_per_batch[0] == max(lowerCamelCase_ ) if failures: raise AssertionError(f'''too many tokens in {len(lowerCamelCase_ )} batches''' ) def __UpperCAmelCase ( self : Optional[int] ): '''simple docstring''' _snake_case , _snake_case , _snake_case : str = self._get_dataset(max_len=5_12 ) _snake_case : Optional[Any] = 2 _snake_case : Dict = ds.make_sortish_sampler(lowerCamelCase_ , shuffle=lowerCamelCase_ ) _snake_case : int = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 ) _snake_case : str = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase_ ) _snake_case : Tuple = tokenizer.pad_token_id def count_pad_tokens(lowerCamelCase_ : List[str] , lowerCamelCase_ : Any="input_ids" ): return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) < sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) ) assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple=10_00 , lowerCamelCase_ : Tuple=1_28 ): '''simple docstring''' if os.getenv('USE_REAL_DATA' , lowerCamelCase_ ): _snake_case : Dict = 'examples/seq2seq/wmt_en_ro' _snake_case : List[Any] = max_len * 2 * 64 if not Path(lowerCamelCase_ ).joinpath('train.len' ).exists(): save_len_file(lowerCamelCase_ , lowerCamelCase_ ) else: _snake_case : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro' _snake_case : List[Any] = max_len * 4 save_len_file(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) _snake_case : str = SeqaSeqDataset( lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , n_obs=lowerCamelCase_ , ) return ds, max_tokens, tokenizer def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _snake_case , _snake_case , _snake_case : Any = self._get_dataset() _snake_case : List[str] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase_ ) ) _snake_case : Optional[Any] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase_ ) ) assert idsa.intersection(lowerCamelCase_ ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] ): '''simple docstring''' _snake_case : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ ) if tok_name == MBART_TINY: _snake_case : int = SeqaSeqDataset( lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , ) _snake_case : Optional[Any] = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: _snake_case : Tuple = SeqaSeqDataset( lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , ) _snake_case : List[Any] = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
652
1
import math def A__( __lowerCAmelCase , __lowerCAmelCase ): if ( not isinstance(__lowerCAmelCase , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('power_factor must be a valid float value between -1 and 1.' ) return apparent_power * power_factor def A__( __lowerCAmelCase , __lowerCAmelCase ): if ( not isinstance(__lowerCAmelCase , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('power_factor must be a valid float value between -1 and 1.' ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
652
from __future__ import annotations def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Dict = list(range(len(__lowerCAmelCase ) ) ) _snake_case : Optional[int] = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )] index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=__lowerCAmelCase ) _snake_case : float = 0 _snake_case : list[float] = [0] * len(__lowerCAmelCase ) for i in index: if weight[i] <= capacity: _snake_case : List[Any] = 1 max_value += value[i] capacity -= weight[i] else: _snake_case : Any = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
652
1
from __future__ import annotations import math def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : int = u for i in range(1 , __lowerCAmelCase ): _snake_case : Optional[int] = temp * (u - i) return temp def A__( ): _snake_case : List[Any] = int(input('enter the numbers of values: ' ) ) _snake_case : list[list[float]] = [] for _ in range(__lowerCAmelCase ): y.append([] ) for i in range(__lowerCAmelCase ): for j in range(__lowerCAmelCase ): y[i].append(__lowerCAmelCase ) _snake_case : Any = 0 print('enter the values of parameters in a list: ' ) _snake_case : int = list(map(__lowerCAmelCase , input().split() ) ) print('enter the values of corresponding parameters: ' ) for i in range(__lowerCAmelCase ): _snake_case : List[str] = float(input() ) _snake_case : Union[str, Any] = int(input('enter the value to interpolate: ' ) ) _snake_case : Dict = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , __lowerCAmelCase ): for j in range(n - i ): _snake_case : int = y[j + 1][i - 1] - y[j][i - 1] _snake_case : Union[str, Any] = y[0][0] for i in range(1 , __lowerCAmelCase ): summ += (ucal(__lowerCAmelCase , __lowerCAmelCase ) * y[0][i]) / math.factorial(__lowerCAmelCase ) print(F'''the value at {value} is {summ}''' ) if __name__ == "__main__": main()
652
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase_ : Any = { '''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''], '''tokenization_canine''': ['''CanineTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[Any] = [ '''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CanineForMultipleChoice''', '''CanineForQuestionAnswering''', '''CanineForSequenceClassification''', '''CanineForTokenClassification''', '''CanineLayer''', '''CanineModel''', '''CaninePreTrainedModel''', '''load_tf_weights_in_canine''', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
652
1
def A__( __lowerCAmelCase ): return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(__lowerCAmelCase ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__('''doctest''').testmod()
652
import math def A__( __lowerCAmelCase ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A__( __lowerCAmelCase = 1_00_01 ): try: _snake_case : int = int(__lowerCAmelCase ) except (TypeError, ValueError): raise TypeError('Parameter nth must be int or castable to int.' ) from None if nth <= 0: raise ValueError('Parameter nth must be greater than or equal to one.' ) _snake_case : list[int] = [] _snake_case : List[Any] = 2 while len(__lowerCAmelCase ) < nth: if is_prime(__lowerCAmelCase ): primes.append(__lowerCAmelCase ) num += 1 else: num += 1 return primes[len(__lowerCAmelCase ) - 1] if __name__ == "__main__": print(F'''{solution() = }''')
652
1
import math def A__( __lowerCAmelCase ): _snake_case : Any = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(__lowerCAmelCase ) def A__( __lowerCAmelCase = 1 / 1_23_45 ): _snake_case : str = 0 _snake_case : Optional[Any] = 0 _snake_case : Any = 3 while True: _snake_case : Any = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(__lowerCAmelCase ): _snake_case : Any = int(__lowerCAmelCase ) total_partitions += 1 if check_partition_perfect(__lowerCAmelCase ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(__lowerCAmelCase ) integer += 1 if __name__ == "__main__": print(F'''{solution() = }''')
652
import torch from transformers import AutoModel class lowercase ( torch.nn.Module ): """simple docstring""" def __init__( self : Tuple , lowerCamelCase_ : Dict="sayef/fsner-bert-base-uncased" ): '''simple docstring''' super(lowerCamelCase_ , self ).__init__() _snake_case : Optional[Any] = AutoModel.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ ) _snake_case : str = torch.nn.CosineSimilarity(3 , 1e-08 ) _snake_case : str = torch.nn.Softmax(dim=1 ) def __UpperCAmelCase ( self : int , **lowerCamelCase_ : List[str] ): '''simple docstring''' return self.bert(**lowerCamelCase_ ).last_hidden_state def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' return token_embeddings.sum(2 , keepdim=lowerCamelCase_ ) def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]=1 ): '''simple docstring''' return self.softmax(T * self.cos(lowerCamelCase_ , lowerCamelCase_ ) ) def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' _snake_case : int = W_supports['sizes'].tolist() _snake_case : int = W_supports['start_token_id'].item() _snake_case : List[str] = W_supports['end_token_id'].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] _snake_case : Optional[int] = self.BERT(**lowerCamelCase_ ) _snake_case : Optional[Any] = self.BERT(**lowerCamelCase_ ) _snake_case : Optional[int] = None _snake_case : Optional[int] = None _snake_case : List[str] = W_supports['input_ids'] == start_token_id _snake_case : Union[str, Any] = W_supports['input_ids'] == end_token_id for i, size in enumerate(lowerCamelCase_ ): if i == 0: _snake_case : str = 0 else: _snake_case : Union[str, Any] = support_sizes[i - 1] _snake_case : Tuple = S[s : s + size][start_token_masks[s : s + size]] _snake_case : Optional[int] = S[s : s + size][end_token_masks[s : s + size]] _snake_case : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) _snake_case : Union[str, Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: _snake_case : Optional[Any] = torch.vstack((p_starts, p_start) ) _snake_case : List[str] = torch.vstack((p_ends, p_end) ) else: _snake_case : Union[str, Any] = p_start _snake_case : Any = p_end return p_starts, p_ends
652
1
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( 'files' , [ ['full:README.md', 'dataset_infos.json'], ['empty:README.md', 'dataset_infos.json'], ['dataset_infos.json'], ['full:README.md'], ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Tuple = tmp_path_factory.mktemp('dset_infos_dir' ) if "full:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('---\ndataset_info:\n dataset_size: 42\n---' ) if "empty:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f: f.write('{"default": {"dataset_size": 42}}' ) _snake_case : int = DatasetInfosDict.from_directory(__lowerCAmelCase ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( 'dataset_info' , [ DatasetInfo(), DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Dict = str(__lowerCAmelCase ) dataset_info.write_to_directory(__lowerCAmelCase ) _snake_case : Union[str, Any] = DatasetInfo.from_directory(__lowerCAmelCase ) assert dataset_info == reloaded assert os.path.exists(os.path.join(__lowerCAmelCase , 'dataset_info.json' ) ) def A__( ): _snake_case : Optional[int] = DatasetInfo( description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , ) _snake_case : int = dataset_info._to_yaml_dict() assert sorted(__lowerCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) _snake_case : str = yaml.safe_dump(__lowerCAmelCase ) _snake_case : Tuple = yaml.safe_load(__lowerCAmelCase ) assert dataset_info_yaml_dict == reloaded def A__( ): _snake_case : int = DatasetInfo() _snake_case : List[str] = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( 'dataset_infos_dict' , [ DatasetInfosDict(), DatasetInfosDict({'default': DatasetInfo()} ), DatasetInfosDict({'my_config_name': DatasetInfo()} ), DatasetInfosDict( { 'default': DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ) } ), DatasetInfosDict( { 'v1': DatasetInfo(dataset_size=42 ), 'v2': DatasetInfo(dataset_size=13_37 ), } ), ] , ) def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Dict = str(__lowerCAmelCase ) dataset_infos_dict.write_to_directory(__lowerCAmelCase ) _snake_case : List[str] = DatasetInfosDict.from_directory(__lowerCAmelCase ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): _snake_case : Tuple = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml _snake_case : List[str] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(__lowerCAmelCase , 'README.md' ) )
652
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowercase ( a_ , a_ , a_ ): """simple docstring""" @register_to_config def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , ): '''simple docstring''' super().__init__() _snake_case : Optional[int] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : List[str] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : Union[str, Any] = False _snake_case : Tuple = nn.Dropout(p=lowerCamelCase_ ) _snake_case : Union[str, Any] = TaConfig( vocab_size=lowerCamelCase_ , d_model=lowerCamelCase_ , num_heads=lowerCamelCase_ , d_kv=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , feed_forward_proj=lowerCamelCase_ , is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , ) _snake_case : Union[str, Any] = nn.ModuleList() for lyr_num in range(lowerCamelCase_ ): _snake_case : Any = TaBlock(lowerCamelCase_ ) self.encoders.append(lowerCamelCase_ ) _snake_case : Tuple = TaLayerNorm(lowerCamelCase_ ) _snake_case : List[str] = nn.Dropout(p=lowerCamelCase_ ) def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple ): '''simple docstring''' _snake_case : Any = self.token_embedder(lowerCamelCase_ ) _snake_case : List[Any] = encoder_input_tokens.shape[1] _snake_case : Any = torch.arange(lowerCamelCase_ , device=encoder_input_tokens.device ) x += self.position_encoding(lowerCamelCase_ ) _snake_case : Tuple = self.dropout_pre(lowerCamelCase_ ) # inverted the attention mask _snake_case : Dict = encoder_input_tokens.size() _snake_case : Optional[int] = self.get_extended_attention_mask(lowerCamelCase_ , lowerCamelCase_ ) for lyr in self.encoders: _snake_case : str = lyr(lowerCamelCase_ , lowerCamelCase_ )[0] _snake_case : Any = self.layer_norm(lowerCamelCase_ ) return self.dropout_post(lowerCamelCase_ ), encoder_inputs_mask
652
1
import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class lowercase ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _snake_case : Optional[Any] = tempfile.mkdtemp() _snake_case : Any = SamImageProcessor() _snake_case : int = SamProcessor(lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) def __UpperCAmelCase ( self : Any , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ ).image_processor def __UpperCAmelCase ( self : str ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' _snake_case : Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] _snake_case : Dict = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' _snake_case : int = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _snake_case : Tuple = self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0 ) _snake_case : Optional[int] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCamelCase_ , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCamelCase_ ) def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _snake_case : Optional[Any] = self.get_image_processor() _snake_case : List[Any] = SamProcessor(image_processor=lowerCamelCase_ ) _snake_case : Tuple = self.prepare_image_inputs() _snake_case : int = image_processor(lowerCamelCase_ , return_tensors='np' ) _snake_case : Dict = processor(images=lowerCamelCase_ , return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) @require_torch def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' _snake_case : Any = self.get_image_processor() _snake_case : Optional[int] = SamProcessor(image_processor=lowerCamelCase_ ) _snake_case : Dict = [torch.ones((1, 3, 5, 5) )] _snake_case : Dict = [[17_64, 26_46]] _snake_case : Dict = [[6_83, 10_24]] _snake_case : Tuple = processor.post_process_masks(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) _snake_case : Any = processor.post_process_masks( lowerCamelCase_ , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) # should also work with np _snake_case : Tuple = [np.ones((1, 3, 5, 5) )] _snake_case : Union[str, Any] = processor.post_process_masks(lowerCamelCase_ , np.array(lowerCamelCase_ ) , np.array(lowerCamelCase_ ) ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) _snake_case : Optional[int] = [[1, 0], [0, 1]] with self.assertRaises(lowerCamelCase_ ): _snake_case : List[Any] = processor.post_process_masks(lowerCamelCase_ , np.array(lowerCamelCase_ ) , np.array(lowerCamelCase_ ) ) @require_vision @require_tf class lowercase ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : str ): '''simple docstring''' _snake_case : Any = tempfile.mkdtemp() _snake_case : Tuple = SamImageProcessor() _snake_case : int = SamProcessor(lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) def __UpperCAmelCase ( self : str , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ ).image_processor def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' _snake_case : Dict = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] _snake_case : Optional[int] = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' _snake_case : int = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _snake_case : Any = self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0 ) _snake_case : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCamelCase_ , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCamelCase_ ) def __UpperCAmelCase ( self : Any ): '''simple docstring''' _snake_case : Optional[Any] = self.get_image_processor() _snake_case : Dict = SamProcessor(image_processor=lowerCamelCase_ ) _snake_case : List[Any] = self.prepare_image_inputs() _snake_case : List[str] = image_processor(lowerCamelCase_ , return_tensors='np' ) _snake_case : Tuple = processor(images=lowerCamelCase_ , return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) @require_tf def __UpperCAmelCase ( self : Optional[int] ): '''simple docstring''' _snake_case : List[Any] = self.get_image_processor() _snake_case : Tuple = SamProcessor(image_processor=lowerCamelCase_ ) _snake_case : List[str] = [tf.ones((1, 3, 5, 5) )] _snake_case : Tuple = [[17_64, 26_46]] _snake_case : Tuple = [[6_83, 10_24]] _snake_case : Tuple = processor.post_process_masks(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , return_tensors='tf' ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) _snake_case : Any = processor.post_process_masks( lowerCamelCase_ , tf.convert_to_tensor(lowerCamelCase_ ) , tf.convert_to_tensor(lowerCamelCase_ ) , return_tensors='tf' , ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) # should also work with np _snake_case : int = [np.ones((1, 3, 5, 5) )] _snake_case : List[Any] = processor.post_process_masks( lowerCamelCase_ , np.array(lowerCamelCase_ ) , np.array(lowerCamelCase_ ) , return_tensors='tf' ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) _snake_case : int = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): _snake_case : Tuple = processor.post_process_masks( lowerCamelCase_ , np.array(lowerCamelCase_ ) , np.array(lowerCamelCase_ ) , return_tensors='tf' ) @require_vision @require_torchvision class lowercase ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Optional[int] ): '''simple docstring''' _snake_case : int = tempfile.mkdtemp() _snake_case : List[str] = SamImageProcessor() _snake_case : Union[str, Any] = SamProcessor(lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) def __UpperCAmelCase ( self : Dict , **lowerCamelCase_ : str ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ ).image_processor def __UpperCAmelCase ( self : int ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self : int ): '''simple docstring''' _snake_case : List[str] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] _snake_case : List[str] = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def __UpperCAmelCase ( self : Optional[Any] ): '''simple docstring''' _snake_case : Optional[int] = self.get_image_processor() _snake_case : List[str] = SamProcessor(image_processor=lowerCamelCase_ ) _snake_case : str = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) _snake_case : List[str] = [tf.convert_to_tensor(lowerCamelCase_ )] _snake_case : List[Any] = [torch.tensor(lowerCamelCase_ )] _snake_case : str = [[17_64, 26_46]] _snake_case : Optional[Any] = [[6_83, 10_24]] _snake_case : List[str] = processor.post_process_masks( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , return_tensors='tf' ) _snake_case : Tuple = processor.post_process_masks( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , return_tensors='pt' ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def __UpperCAmelCase ( self : Dict ): '''simple docstring''' _snake_case : str = self.get_image_processor() _snake_case : Any = SamProcessor(image_processor=lowerCamelCase_ ) _snake_case : Union[str, Any] = self.prepare_image_inputs() _snake_case : str = image_processor(lowerCamelCase_ , return_tensors='pt' )['pixel_values'].numpy() _snake_case : Union[str, Any] = processor(images=lowerCamelCase_ , return_tensors='pt' )['pixel_values'].numpy() _snake_case : str = image_processor(lowerCamelCase_ , return_tensors='tf' )['pixel_values'].numpy() _snake_case : str = processor(images=lowerCamelCase_ , return_tensors='tf' )['pixel_values'].numpy() self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) ) self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) ) self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) )
652
def A__( __lowerCAmelCase ): assert column_title.isupper() _snake_case : List[Any] = 0 _snake_case : List[str] = len(__lowerCAmelCase ) - 1 _snake_case : Dict = 0 while index >= 0: _snake_case : List[str] = (ord(column_title[index] ) - 64) * pow(26 , __lowerCAmelCase ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
652
1
def A__( __lowerCAmelCase ): assert column_title.isupper() _snake_case : List[Any] = 0 _snake_case : List[str] = len(__lowerCAmelCase ) - 1 _snake_case : Dict = 0 while index >= 0: _snake_case : List[str] = (ord(column_title[index] ) - 64) * pow(26 , __lowerCAmelCase ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
652
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets lowercase_ : List[str] = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' lowercase_ : Optional[int] = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' lowercase_ : Any = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): """simple docstring""" def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if version.parse(scb.__version__ ) < version.parse('1.4.12' ): raise ImportWarning( 'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n' 'You can install it with `pip install "sacrebleu>=1.4.12"`.' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ), } ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[ 'https://github.com/jhclark/tercom', ] , ) def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ): '''simple docstring''' _snake_case : str = len(references[0] ) if any(len(lowerCamelCase_ ) != references_per_prediction for refs in references ): raise ValueError('Sacrebleu requires the same number of references for each prediction' ) _snake_case : int = [[refs[i] for refs in references] for i in range(lowerCamelCase_ )] _snake_case : Optional[int] = TER( normalized=lowerCamelCase_ , no_punct=lowerCamelCase_ , asian_support=lowerCamelCase_ , case_sensitive=lowerCamelCase_ , ) _snake_case : Optional[Any] = sb_ter.corpus_score(lowerCamelCase_ , lowerCamelCase_ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
652
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING lowercase_ : Optional[Any] = logging.get_logger(__name__) class lowercase ( a_ ): """simple docstring""" _UpperCamelCase : Dict = "upernet" def __init__( self : Optional[int] , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Union[str, Any]=5_12 , lowerCamelCase_ : List[str]=0.02 , lowerCamelCase_ : Union[str, Any]=[1, 2, 3, 6] , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : int=0.4 , lowerCamelCase_ : List[Any]=3_84 , lowerCamelCase_ : Optional[int]=2_56 , lowerCamelCase_ : List[Any]=1 , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[str]=2_55 , **lowerCamelCase_ : Tuple , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) _snake_case : Tuple = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] ) elif isinstance(lowerCamelCase_ , lowerCamelCase_ ): _snake_case : List[Any] = backbone_config.get('model_type' ) _snake_case : Optional[Any] = CONFIG_MAPPING[backbone_model_type] _snake_case : str = config_class.from_dict(lowerCamelCase_ ) _snake_case : Optional[int] = backbone_config _snake_case : List[Any] = hidden_size _snake_case : Any = initializer_range _snake_case : List[Any] = pool_scales _snake_case : str = use_auxiliary_head _snake_case : Union[str, Any] = auxiliary_loss_weight _snake_case : Any = auxiliary_in_channels _snake_case : Union[str, Any] = auxiliary_channels _snake_case : List[str] = auxiliary_num_convs _snake_case : Dict = auxiliary_concat_input _snake_case : Optional[int] = loss_ignore_index def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' _snake_case : List[Any] = copy.deepcopy(self.__dict__ ) _snake_case : Dict = self.backbone_config.to_dict() _snake_case : Dict = self.__class__.model_type return output
652
import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels lowercase_ : Optional[int] = object() # For specifying empty leaf dict `{}` lowercase_ : List[Any] = object() def A__( __lowerCAmelCase , __lowerCAmelCase ): _snake_case : Optional[Any] = tuple((re.compile(x + '$' ) for x in qs) ) for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ): _snake_case : Tuple = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , ks[i:] )] if matches and all(__lowerCAmelCase ): return True return False def A__( __lowerCAmelCase ): def replace(__lowerCAmelCase , __lowerCAmelCase ): for rule, replacement in rules: if _match(__lowerCAmelCase , __lowerCAmelCase ): return replacement return val return replace def A__( ): return [ # embeddings (("transformer", "wpe", "embedding"), P('mp' , __lowerCAmelCase )), (("transformer", "wte", "embedding"), P('mp' , __lowerCAmelCase )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase , 'mp' )), (("attention", "out_proj", "kernel"), P('mp' , __lowerCAmelCase )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__lowerCAmelCase , 'mp' )), (("mlp", "c_fc", "bias"), P('mp' )), (("mlp", "c_proj", "kernel"), P('mp' , __lowerCAmelCase )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def A__( __lowerCAmelCase ): _snake_case : Optional[Any] = _get_partition_rules() _snake_case : Optional[Any] = _replacement_rules(__lowerCAmelCase ) _snake_case : str = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )} _snake_case : str = {k: replace(__lowerCAmelCase , __lowerCAmelCase ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__lowerCAmelCase ) )
652
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer lowercase_ : Any = logging.get_logger(__name__) lowercase_ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowercase_ : str = { '''vocab_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''', '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-german-cased''': ( '''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json''' ), '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json''' ), }, } lowercase_ : List[str] = { '''distilbert-base-uncased''': 512, '''distilbert-base-uncased-distilled-squad''': 512, '''distilbert-base-cased''': 512, '''distilbert-base-cased-distilled-squad''': 512, '''distilbert-base-german-cased''': 512, '''distilbert-base-multilingual-cased''': 512, } lowercase_ : List[str] = { '''distilbert-base-uncased''': {'''do_lower_case''': True}, '''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True}, '''distilbert-base-cased''': {'''do_lower_case''': False}, '''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False}, '''distilbert-base-german-cased''': {'''do_lower_case''': False}, '''distilbert-base-multilingual-cased''': {'''do_lower_case''': False}, } class lowercase ( a_ ): """simple docstring""" _UpperCamelCase : Dict = VOCAB_FILES_NAMES _UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"] _UpperCamelCase : Any = DistilBertTokenizer def __init__( self : int , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : str=True , lowerCamelCase_ : Optional[Any]="[UNK]" , lowerCamelCase_ : List[str]="[SEP]" , lowerCamelCase_ : int="[PAD]" , lowerCamelCase_ : Union[str, Any]="[CLS]" , lowerCamelCase_ : Any="[MASK]" , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Dict=None , **lowerCamelCase_ : Optional[Any] , ): '''simple docstring''' super().__init__( lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , tokenize_chinese_chars=lowerCamelCase_ , strip_accents=lowerCamelCase_ , **lowerCamelCase_ , ) _snake_case : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , lowerCamelCase_ ) != do_lower_case or normalizer_state.get('strip_accents' , lowerCamelCase_ ) != strip_accents or normalizer_state.get('handle_chinese_chars' , lowerCamelCase_ ) != tokenize_chinese_chars ): _snake_case : Tuple = getattr(lowerCamelCase_ , normalizer_state.pop('type' ) ) _snake_case : Optional[Any] = do_lower_case _snake_case : str = strip_accents _snake_case : List[str] = tokenize_chinese_chars _snake_case : List[Any] = normalizer_class(**lowerCamelCase_ ) _snake_case : List[Any] = do_lower_case def __UpperCAmelCase ( self : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=None ): '''simple docstring''' _snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' _snake_case : Union[str, Any] = [self.sep_token_id] _snake_case : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): '''simple docstring''' _snake_case : Dict = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ ) return tuple(lowerCamelCase_ )
652
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel lowercase_ : Any = logging.getLogger(__name__) def A__( __lowerCAmelCase , __lowerCAmelCase ): # save results if os.path.exists(__lowerCAmelCase ): if os.path.exists(os.path.join(__lowerCAmelCase , 'config.json' ) ) and os.path.isfile( os.path.join(__lowerCAmelCase , 'config.json' ) ): os.remove(os.path.join(__lowerCAmelCase , 'config.json' ) ) if os.path.exists(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ): os.remove(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) else: os.makedirs(__lowerCAmelCase ) model.save_pretrained(__lowerCAmelCase ) def A__( __lowerCAmelCase , __lowerCAmelCase=False ): _snake_case : int = 2 if unlogit: _snake_case : Dict = torch.pow(__lowerCAmelCase , __lowerCAmelCase ) _snake_case : Optional[int] = p * torch.log(__lowerCAmelCase ) _snake_case : List[str] = 0 return -plogp.sum(dim=-1 ) def A__( __lowerCAmelCase ): logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(__lowerCAmelCase ) ) ) ) for row in range(len(__lowerCAmelCase ) ): if tensor.dtype != torch.long: logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) ) def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False ): _snake_case , _snake_case : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads _snake_case : Optional[int] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device ) _snake_case : Union[str, Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device ) if head_mask is None: _snake_case : List[str] = torch.ones(__lowerCAmelCase , __lowerCAmelCase ).to(args.device ) head_mask.requires_grad_(requires_grad=__lowerCAmelCase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _snake_case : List[str] = None _snake_case : str = 0.0 _snake_case : List[str] = 0.0 for step, inputs in enumerate(tqdm(__lowerCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ): _snake_case : Dict = tuple(t.to(args.device ) for t in inputs ) ((_snake_case) , ) : int = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _snake_case : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase , head_mask=__lowerCAmelCase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _snake_case , _snake_case , _snake_case : Dict = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__lowerCAmelCase ): _snake_case : int = entropy(attn.detach() , __lowerCAmelCase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__lowerCAmelCase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _snake_case : List[str] = 2 _snake_case : Any = torch.pow(torch.pow(__lowerCAmelCase , __lowerCAmelCase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: _snake_case : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(__lowerCAmelCase ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(__lowerCAmelCase ) logger.info('Head ranked by importance scores' ) _snake_case : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _snake_case : Optional[Any] = torch.arange( head_importance.numel() , device=args.device ) _snake_case : Union[str, Any] = head_ranks.view_as(__lowerCAmelCase ) print_ad_tensor(__lowerCAmelCase ) return attn_entropy, head_importance, total_loss def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case , _snake_case , _snake_case : int = compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase ) _snake_case : List[str] = 1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' , __lowerCAmelCase , original_score * args.masking_threshold ) _snake_case : Optional[Any] = torch.ones_like(__lowerCAmelCase ) _snake_case : Tuple = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _snake_case : Dict = original_score while current_score >= original_score * args.masking_threshold: _snake_case : List[str] = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _snake_case : Optional[Any] = float('Inf' ) _snake_case : Optional[int] = head_importance.view(-1 ).sort()[1] if len(__lowerCAmelCase ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads _snake_case : List[Any] = current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) ) _snake_case : Tuple = new_head_mask.view(-1 ) _snake_case : List[str] = 0.0 _snake_case : int = new_head_mask.view_as(__lowerCAmelCase ) _snake_case : Optional[Any] = new_head_mask.clone().detach() print_ad_tensor(__lowerCAmelCase ) # Compute metric and head importance again _snake_case , _snake_case , _snake_case : List[str] = compute_heads_importance( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , head_mask=__lowerCAmelCase ) _snake_case : Union[str, Any] = 1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' , __lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , ) logger.info('Final head mask' ) print_ad_tensor(__lowerCAmelCase ) np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() ) return head_mask def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _snake_case : List[Any] = datetime.now() _snake_case , _snake_case , _snake_case : List[Any] = compute_heads_importance( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase ) _snake_case : str = 1 / loss _snake_case : Optional[int] = datetime.now() - before_time _snake_case : Optional[int] = sum(p.numel() for p in model.parameters() ) _snake_case : Optional[Any] = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCAmelCase ) ) } for k, v in heads_to_prune.items(): if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _snake_case : Optional[int] = [ v, ] assert sum(len(__lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__lowerCAmelCase ) _snake_case : Optional[Any] = sum(p.numel() for p in model.parameters() ) _snake_case : List[str] = datetime.now() _snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase , actually_pruned=__lowerCAmelCase , ) _snake_case : Dict = 1 / loss _snake_case : str = datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __lowerCAmelCase , __lowerCAmelCase , pruned_num_params / original_num_params * 1_00 , ) logger.info('Pruning: score with masking: %f score with pruning: %f' , __lowerCAmelCase , __lowerCAmelCase ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 ) save_model(__lowerCAmelCase , args.output_dir ) def A__( ): _snake_case : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , ) parser.add_argument( '--model_name_or_path' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , ) # Other parameters parser.add_argument( '--config_name' , default='' , type=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , ) parser.add_argument( '--tokenizer_name' , default='' , type=__lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , ) parser.add_argument( '--cache_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , ) parser.add_argument( '--data_subset' , type=__lowerCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , ) parser.add_argument( '--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' , default=0.9 , type=__lowerCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , ) parser.add_argument( '--masking_amount' , default=0.1 , type=__lowerCAmelCase , help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' , default='acc' , type=__lowerCAmelCase , help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' , default=1_28 , type=__lowerCAmelCase , help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) , ) parser.add_argument('--batch_size' , default=1 , type=__lowerCAmelCase , help='Batch size.' ) parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 ) parser.add_argument('--local_rank' , type=__lowerCAmelCase , default=-1 , help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' ) _snake_case : List[str] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _snake_case : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) _snake_case : Any = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _snake_case : Union[str, Any] = torch.device('cuda' , args.local_rank ) _snake_case : Dict = 1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _snake_case : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _snake_case : List[str] = nn.parallel.DistributedDataParallel( __lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCAmelCase ) elif args.n_gpu > 1: _snake_case : int = nn.DataParallel(__lowerCAmelCase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase ) torch.save(__lowerCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' , __lowerCAmelCase ) # Prepare dataset _snake_case : str = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _snake_case : Dict = (torch.from_numpy(__lowerCAmelCase ),) _snake_case : List[Any] = TensorDataset(*__lowerCAmelCase ) _snake_case : List[str] = RandomSampler(__lowerCAmelCase ) _snake_case : List[str] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _snake_case : Union[str, Any] = mask_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) prune_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": main()
652
1
import argparse import os import re import packaging.version lowercase_ : Optional[int] = '''examples/''' lowercase_ : List[Any] = { '''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''), '''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } lowercase_ : int = { '''init''': '''src/diffusers/__init__.py''', '''setup''': '''setup.py''', } lowercase_ : int = '''README.md''' def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): with open(__lowerCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f: _snake_case : Optional[int] = f.read() _snake_case , _snake_case : Dict = REPLACE_PATTERNS[pattern] _snake_case : Tuple = replace.replace('VERSION' , __lowerCAmelCase ) _snake_case : str = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase ) with open(__lowerCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.write(__lowerCAmelCase ) def A__( __lowerCAmelCase ): for folder, directories, fnames in os.walk(__lowerCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('research_projects' ) if "legacy" in directories: directories.remove('legacy' ) for fname in fnames: if fname.endswith('.py' ): update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern='examples' ) def A__( __lowerCAmelCase , __lowerCAmelCase=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if not patch: update_version_in_examples(__lowerCAmelCase ) def A__( ): _snake_case : List[Any] = '🤗 Transformers currently provides the following architectures' _snake_case : List[Any] = '1. Want to contribute a new model?' with open(__lowerCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f: _snake_case : str = f.readlines() # Find the start of the list. _snake_case : int = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 _snake_case : Tuple = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('1.' ): _snake_case : Tuple = lines[index].replace( 'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , ) index += 1 with open(__lowerCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(__lowerCAmelCase ) def A__( ): with open(REPLACE_FILES['init'] , 'r' ) as f: _snake_case : Optional[int] = f.read() _snake_case : Tuple = REPLACE_PATTERNS['init'][0].search(__lowerCAmelCase ).groups()[0] return packaging.version.parse(__lowerCAmelCase ) def A__( __lowerCAmelCase=False ): _snake_case : Optional[Any] = get_version() if patch and default_version.is_devrelease: raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' ) if default_version.is_devrelease: _snake_case : int = default_version.base_version elif patch: _snake_case : Optional[Any] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: _snake_case : Any = F'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. _snake_case : List[Any] = input(F'''Which version are you releasing? [{default_version}]''' ) if len(__lowerCAmelCase ) == 0: _snake_case : Tuple = default_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase ) def A__( ): _snake_case : Tuple = get_version() _snake_case : List[Any] = F'''{current_version.major}.{current_version.minor + 1}.0.dev0''' _snake_case : List[Any] = current_version.base_version # Check with the user we got that right. _snake_case : Union[str, Any] = input(F'''Which version are we developing now? [{dev_version}]''' ) if len(__lowerCAmelCase ) == 0: _snake_case : Dict = dev_version print(F'''Updating version to {version}.''' ) global_version_update(__lowerCAmelCase ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": lowercase_ : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') lowercase_ : int = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
652
def A__( __lowerCAmelCase ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError('only integers accepted as input' ) else: _snake_case : Any = str(abs(__lowerCAmelCase ) ) _snake_case : List[str] = [list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )] for index in range(len(__lowerCAmelCase ) ): num_transpositions[index].pop(__lowerCAmelCase ) return max( int(''.join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
652
1