code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' from maths.prime_check import is_prime def __magic_name__ ( A ) -> int: if not isinstance(A , A ): snake_case = F'''Input value of [number={number}] must be an integer''' raise TypeError(A ) if is_prime(A ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
332
'''simple docstring''' def __magic_name__ ( A ) -> float: return 1_0 - x * x def __magic_name__ ( A , A ) -> float: # Bolzano theory in order to find if there is a root between a and b if equation(A ) * equation(A ) >= 0: raise ValueError('Wrong space!' ) snake_case = a while (b - a) >= 0.01: # Find middle point snake_case = (a + b) / 2 # Check if middle point is root if equation(A ) == 0.0: break # Decide the side to repeat the steps if equation(A ) * equation(A ) < 0: snake_case = c else: snake_case = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
332
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = 42 snake_case_ = 42 snake_case_ = None class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ): snake_case_ = 2 @register_to_config def __init__( self, lowercase_ = 0.02, lowercase_ = 100, lowercase_ = 1.007, lowercase_ = 80, lowercase_ = 0.05, lowercase_ = 50, ) -> Dict: # standard deviation of the initial noise distribution snake_case = sigma_max # setable values snake_case = None snake_case = None snake_case = None # sigma(t_i) def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> torch.FloatTensor: return sample def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Optional[Any]: snake_case = num_inference_steps snake_case = np.arange(0, self.num_inference_steps )[::-1].copy() snake_case = torch.from_numpy(lowercase_ ).to(lowercase_ ) snake_case = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] snake_case = torch.tensor(lowercase_, dtype=torch.floataa, device=lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ = None ) -> Tuple[torch.FloatTensor, float]: if self.config.s_min <= sigma <= self.config.s_max: snake_case = min(self.config.s_churn / self.num_inference_steps, 2**0.5 - 1 ) else: snake_case = 0 # sample eps ~ N(0, S_noise^2 * I) snake_case = self.config.s_noise * randn_tensor(sample.shape, generator=lowercase_ ).to(sample.device ) snake_case = sigma + gamma * sigma snake_case = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ = True, ) -> Union[KarrasVeOutput, Tuple]: snake_case = sample_hat + sigma_hat * model_output snake_case = (sample_hat - pred_original_sample) / sigma_hat snake_case = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=lowercase_, derivative=lowercase_, pred_original_sample=lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ = True, ) -> Union[KarrasVeOutput, Tuple]: snake_case = sample_prev + sigma_prev * model_output snake_case = (sample_prev - pred_original_sample) / sigma_prev snake_case = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=lowercase_, derivative=lowercase_, pred_original_sample=lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ ) -> Any: raise NotImplementedError()
332
'''simple docstring''' import pytest lowerCAmelCase_ = "__dummy_dataset1__" lowerCAmelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n" @pytest.fixture def __magic_name__ ( ) -> List[Any]: return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def __magic_name__ ( ) -> Union[str, Any]: return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def __magic_name__ ( A , A , A ) -> Optional[int]: snake_case = dataset_loading_script_name snake_case = tmp_path / 'datasets' / script_name script_dir.mkdir(parents=A ) snake_case = script_dir / F'''{script_name}.py''' with open(A , 'w' ) as f: f.write(A ) return str(A )
332
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_funnel import FunnelTokenizer lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} lowerCAmelCase_ = [ "small", "small-base", "medium", "medium-base", "intermediate", "intermediate-base", "large", "large-base", "xlarge", "xlarge-base", ] lowerCAmelCase_ = { "vocab_file": { "funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt", "funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt", "funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt", "funnel-transformer/medium-base": ( "https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt" ), "funnel-transformer/intermediate": ( "https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt" ), "funnel-transformer/intermediate-base": ( "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt" ), "funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt", "funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt", "funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt", "funnel-transformer/xlarge-base": ( "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json", "funnel-transformer/small-base": ( "https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json" ), "funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json", "funnel-transformer/medium-base": ( "https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json" ), "funnel-transformer/intermediate": ( "https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json" ), "funnel-transformer/intermediate-base": ( "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json" ), "funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json", "funnel-transformer/large-base": ( "https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json" ), "funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json", "funnel-transformer/xlarge-base": ( "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json" ), }, } lowerCAmelCase_ = {f"funnel-transformer/{name}": 5_1_2 for name in _model_names} lowerCAmelCase_ = {f"funnel-transformer/{name}": {"do_lower_case": True} for name in _model_names} class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_INIT_CONFIGURATION snake_case_ = FunnelTokenizer snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = 2 def __init__( self, lowercase_=None, lowercase_=None, lowercase_=True, lowercase_="<unk>", lowercase_="<sep>", lowercase_="<pad>", lowercase_="<cls>", lowercase_="<mask>", lowercase_="<s>", lowercase_="</s>", lowercase_=True, lowercase_=True, lowercase_=None, lowercase_="##", **lowercase_, ) -> Dict: super().__init__( lowercase_, tokenizer_file=lowercase_, do_lower_case=lowercase_, unk_token=lowercase_, sep_token=lowercase_, pad_token=lowercase_, cls_token=lowercase_, mask_token=lowercase_, bos_token=lowercase_, eos_token=lowercase_, clean_text=lowercase_, tokenize_chinese_chars=lowercase_, strip_accents=lowercase_, wordpieces_prefix=lowercase_, **lowercase_, ) snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase', lowercase_ ) != do_lower_case or normalizer_state.get('strip_accents', lowercase_ ) != strip_accents or normalizer_state.get('handle_chinese_chars', lowercase_ ) != tokenize_chinese_chars ): snake_case = getattr(lowercase_, normalizer_state.pop('type' ) ) snake_case = do_lower_case snake_case = strip_accents snake_case = tokenize_chinese_chars snake_case = normalizer_class(**lowercase_ ) snake_case = do_lower_case def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Optional[int]: snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]: snake_case = [self.sep_token_id] snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]: snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ ) return tuple(lowercase_ )
332
'''simple docstring''' from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time lowerCAmelCase_ = Lock() def __magic_name__ ( A , A , A , A , A , A , A ) -> Any: global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 1_0 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(A ) process_lock.release() # receive your right neighbor's value process_lock.acquire() snake_case = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left snake_case = min(A , A ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(A ) process_lock.release() # receive your left neighbor's value process_lock.acquire() snake_case = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right snake_case = max(A , A ) # after all swaps are performed, send the values back to main result_pipe[1].send(A ) def __magic_name__ ( A ) -> str: snake_case = [] snake_case = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop snake_case = Pipe() snake_case = Pipe() process_array_.append( Process( target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) snake_case = temp_rs snake_case = temp_rr for i in range(1 , len(A ) - 1 ): snake_case = Pipe() snake_case = Pipe() process_array_.append( Process( target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) snake_case = temp_rs snake_case = temp_rr process_array_.append( Process( target=A , args=( len(A ) - 1, arr[len(A ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(A ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(A ) ): snake_case = result_pipe[p][0].recv() process_array_[p].join() return arr def __magic_name__ ( ) -> Tuple: snake_case = list(range(1_0 , 0 , -1 ) ) print('Initial List' ) print(*A ) snake_case = odd_even_transposition(A ) print('Sorted List\n' ) print(*A ) if __name__ == "__main__": main()
332
1
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class lowerCamelCase ( __lowerCAmelCase ): def __init__( self, lowercase_, lowercase_, lowercase_, lowercase_ = None, ) -> List[str]: super().__init__() self.register_modules(transformer=lowercase_, vae=lowercase_, scheduler=lowercase_ ) # create a imagenet -> id dictionary for easier use snake_case = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(',' ): snake_case = int(lowercase_ ) snake_case = dict(sorted(self.labels.items() ) ) def _lowerCamelCase ( self, lowercase_ ) -> List[int]: if not isinstance(lowercase_, lowercase_ ): snake_case = list(lowercase_ ) for l in label: if l not in self.labels: raise ValueError( F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self, lowercase_, lowercase_ = 4.0, lowercase_ = None, lowercase_ = 50, lowercase_ = "pil", lowercase_ = True, ) -> Union[ImagePipelineOutput, Tuple]: snake_case = len(lowercase_ ) snake_case = self.transformer.config.sample_size snake_case = self.transformer.config.in_channels snake_case = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size), generator=lowercase_, device=self.device, dtype=self.transformer.dtype, ) snake_case = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents snake_case = torch.tensor(lowercase_, device=self.device ).reshape(-1 ) snake_case = torch.tensor([1000] * batch_size, device=self.device ) snake_case = torch.cat([class_labels, class_null], 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(lowercase_ ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: snake_case = latent_model_input[: len(lowercase_ ) // 2] snake_case = torch.cat([half, half], dim=0 ) snake_case = self.scheduler.scale_model_input(lowercase_, lowercase_ ) snake_case = t if not torch.is_tensor(lowercase_ ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) snake_case = latent_model_input.device.type == 'mps' if isinstance(lowercase_, lowercase_ ): snake_case = torch.floataa if is_mps else torch.floataa else: snake_case = torch.intaa if is_mps else torch.intaa snake_case = torch.tensor([timesteps], dtype=lowercase_, device=latent_model_input.device ) elif len(timesteps.shape ) == 0: snake_case = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML snake_case = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output snake_case = self.transformer( lowercase_, timestep=lowercase_, class_labels=lowercase_ ).sample # perform guidance if guidance_scale > 1: snake_case , snake_case = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] snake_case , snake_case = torch.split(lowercase_, len(lowercase_ ) // 2, dim=0 ) snake_case = uncond_eps + guidance_scale * (cond_eps - uncond_eps) snake_case = torch.cat([half_eps, half_eps], dim=0 ) snake_case = torch.cat([eps, rest], dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: snake_case , snake_case = torch.split(lowercase_, lowercase_, dim=1 ) else: snake_case = noise_pred # compute previous image: x_t -> x_t-1 snake_case = self.scheduler.step(lowercase_, lowercase_, lowercase_ ).prev_sample if guidance_scale > 1: snake_case , snake_case = latent_model_input.chunk(2, dim=0 ) else: snake_case = latent_model_input snake_case = 1 / self.vae.config.scaling_factor * latents snake_case = self.vae.decode(lowercase_ ).sample snake_case = (samples / 2 + 0.5).clamp(0, 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 snake_case = samples.cpu().permute(0, 2, 3, 1 ).float().numpy() if output_type == "pil": snake_case = self.numpy_to_pil(lowercase_ ) if not return_dict: return (samples,) return ImagePipelineOutput(images=lowercase_ )
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A ) -> None: create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] ) def __magic_name__ ( A , A , A , A , ) -> None: if index == len(A ): print(A ) return for i in range(len(A ) ): if not index_used[i]: current_sequence.append(sequence[i] ) snake_case = True create_state_space_tree(A , A , index + 1 , A ) current_sequence.pop() snake_case = False lowerCAmelCase_ = [3, 1, 2, 4] generate_all_permutations(sequence) lowerCAmelCase_ = ["A", "B", "C"] generate_all_permutations(sequence_a)
332
1
'''simple docstring''' def __magic_name__ ( A ) -> float: return 1_0 - x * x def __magic_name__ ( A , A ) -> float: # Bolzano theory in order to find if there is a root between a and b if equation(A ) * equation(A ) >= 0: raise ValueError('Wrong space!' ) snake_case = a while (b - a) >= 0.01: # Find middle point snake_case = (a + b) / 2 # Check if middle point is root if equation(A ) == 0.0: break # Decide the side to repeat the steps if equation(A ) * equation(A ) < 0: snake_case = c else: snake_case = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
332
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''roberta''' def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple: super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ ) snake_case = vocab_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = hidden_act snake_case = intermediate_size snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_vocab_size snake_case = initializer_range snake_case = layer_norm_eps snake_case = position_embedding_type snake_case = use_cache snake_case = classifier_dropout class lowerCamelCase ( __lowerCAmelCase ): @property def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
332
1
'''simple docstring''' import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def __magic_name__ ( A ) -> Tuple: snake_case = [] embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', F'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', F'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', F'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', F'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def __magic_name__ ( A , A ) -> Optional[int]: snake_case = [] attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def __magic_name__ ( A ) -> List[Any]: snake_case = [] token.append((F'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') ) return token def __magic_name__ ( ) -> Dict: snake_case = [] head.append(('layernorm.weight', 'norm.weight') ) head.append(('layernorm.bias', 'norm.bias') ) head.append(('classifier.weight', 'head.weight') ) head.append(('classifier.bias', 'head.bias') ) return head def __magic_name__ ( A , A , A , A ) -> int: snake_case = 'imagenet-1k-id2label.json' snake_case = 1_0_0_0 snake_case = 'huggingface/label-files' snake_case = num_labels snake_case = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) ) snake_case = {int(A ): v for k, v in idalabel.items()} snake_case = idalabel snake_case = {v: k for k, v in idalabel.items()} snake_case = snake_case = CvtConfig(num_labels=A , idalabel=A , labelaid=A ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13": snake_case = [1, 2, 1_0] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21": snake_case = [1, 4, 1_6] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: snake_case = [2, 2, 2_0] snake_case = [3, 1_2, 1_6] snake_case = [1_9_2, 7_6_8, 1_0_2_4] snake_case = CvtForImageClassification(A ) snake_case = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) snake_case = image_size snake_case = torch.load(A , map_location=torch.device('cpu' ) ) snake_case = OrderedDict() snake_case = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: snake_case = list_of_state_dict + cls_token(A ) snake_case = list_of_state_dict + embeddings(A ) for cnt in range(config.depth[idx] ): snake_case = list_of_state_dict + attention(A , A ) snake_case = list_of_state_dict + final() for gg in list_of_state_dict: print(A ) for i in range(len(A ) ): snake_case = original_weights[list_of_state_dict[i][1]] model.load_state_dict(A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( "--cvt_model", default="cvt-w24", type=str, help="Name of the cvt model you'd like to convert.", ) parser.add_argument( "--image_size", default=3_8_4, type=int, help="Input Image Size", ) parser.add_argument( "--cvt_file_name", default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth", type=str, help="Input Image Size", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) lowerCAmelCase_ = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
332
'''simple docstring''' import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} lowerCAmelCase_ = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } lowerCAmelCase_ = { "allenai/led-base-16384": 1_6_3_8_4, } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = LEDTokenizer snake_case_ = ['''input_ids''', '''attention_mask'''] def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int: super().__init__( lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, ) snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space: snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) ) snake_case = add_prefix_space snake_case = pre_tok_class(**lowercase_ ) snake_case = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` snake_case = 'post_processor' snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ ) if tokenizer_component_instance: snake_case = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: snake_case = tuple(state['sep'] ) if "cls" in state: snake_case = tuple(state['cls'] ) snake_case = False if state.get('add_prefix_space', lowercase_ ) != add_prefix_space: snake_case = add_prefix_space snake_case = True if state.get('trim_offsets', lowercase_ ) != trim_offsets: snake_case = trim_offsets snake_case = True if changes_to_apply: snake_case = getattr(lowercase_, state.pop('type' ) ) snake_case = component_class(**lowercase_ ) setattr(self.backend_tokenizer, lowercase_, lowercase_ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _lowerCamelCase ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _lowerCamelCase ( self, lowercase_ ) -> Any: snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value snake_case = value def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding: snake_case = kwargs.get('is_split_into_words', lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding: snake_case = kwargs.get('is_split_into_words', lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._encode_plus(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]: snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ ) return tuple(lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict: snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]: snake_case = [self.sep_token_id] snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict: snake_case = super()._pad( encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, ) # Load from model defaults if return_attention_mask is None: snake_case = 'attention_mask' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: snake_case = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ ) if needs_to_be_padded: snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` snake_case = ( encoded_inputs['global_attention_mask'] + [-1] * difference ) elif self.padding_side == "left": snake_case = [-1] * difference + encoded_inputs[ 'global_attention_mask' ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return encoded_inputs
332
1
'''simple docstring''' import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> Optional[Any]: return F'''gaussian_noise_s={seed}_shape={"_".join([str(lowercase_ ) for s in shape] )}.npy''' def _lowerCamelCase ( self ) -> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() def _lowerCamelCase ( self, lowercase_=0, lowercase_=(4, 4, 64, 64), lowercase_=False ) -> Tuple: snake_case = jnp.bfloataa if fpaa else jnp.floataa snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase_, lowercase_ ) ), dtype=lowercase_ ) return image def _lowerCamelCase ( self, lowercase_=False, lowercase_="CompVis/stable-diffusion-v1-4" ) -> int: snake_case = jnp.bfloataa if fpaa else jnp.floataa snake_case = 'bf16' if fpaa else None snake_case , snake_case = FlaxUNetaDConditionModel.from_pretrained( lowercase_, subfolder='unet', dtype=lowercase_, revision=lowercase_ ) return model, params def _lowerCamelCase ( self, lowercase_=0, lowercase_=(4, 77, 768), lowercase_=False ) -> Optional[Any]: snake_case = jnp.bfloataa if fpaa else jnp.floataa snake_case = jnp.array(load_hf_numpy(self.get_file_format(lowercase_, lowercase_ ) ), dtype=lowercase_ ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]], [17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]], [8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]], [3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]], # fmt: on ] ) def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ ) -> Optional[Any]: snake_case , snake_case = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4', fpaa=lowercase_ ) snake_case = self.get_latents(lowercase_, fpaa=lowercase_ ) snake_case = self.get_encoder_hidden_states(lowercase_, fpaa=lowercase_ ) snake_case = model.apply( {'params': params}, lowercase_, jnp.array(lowercase_, dtype=jnp.intaa ), encoder_hidden_states=lowercase_, ).sample assert sample.shape == latents.shape snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa ) snake_case = jnp.array(lowercase_, dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(lowercase_, lowercase_, atol=1E-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]], [17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]], [8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]], [3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]], # fmt: on ] ) def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ ) -> Tuple: snake_case , snake_case = self.get_unet_model(model_id='stabilityai/stable-diffusion-2', fpaa=lowercase_ ) snake_case = self.get_latents(lowercase_, shape=(4, 4, 96, 96), fpaa=lowercase_ ) snake_case = self.get_encoder_hidden_states(lowercase_, shape=(4, 77, 1024), fpaa=lowercase_ ) snake_case = model.apply( {'params': params}, lowercase_, jnp.array(lowercase_, dtype=jnp.intaa ), encoder_hidden_states=lowercase_, ).sample assert sample.shape == latents.shape snake_case = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa ) snake_case = jnp.array(lowercase_, dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(lowercase_, lowercase_, atol=1E-2 )
332
'''simple docstring''' import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def __magic_name__ ( A ) -> Tuple: snake_case = [] embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', F'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', F'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', F'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', F'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def __magic_name__ ( A , A ) -> Optional[int]: snake_case = [] attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def __magic_name__ ( A ) -> List[Any]: snake_case = [] token.append((F'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') ) return token def __magic_name__ ( ) -> Dict: snake_case = [] head.append(('layernorm.weight', 'norm.weight') ) head.append(('layernorm.bias', 'norm.bias') ) head.append(('classifier.weight', 'head.weight') ) head.append(('classifier.bias', 'head.bias') ) return head def __magic_name__ ( A , A , A , A ) -> int: snake_case = 'imagenet-1k-id2label.json' snake_case = 1_0_0_0 snake_case = 'huggingface/label-files' snake_case = num_labels snake_case = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) ) snake_case = {int(A ): v for k, v in idalabel.items()} snake_case = idalabel snake_case = {v: k for k, v in idalabel.items()} snake_case = snake_case = CvtConfig(num_labels=A , idalabel=A , labelaid=A ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13": snake_case = [1, 2, 1_0] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21": snake_case = [1, 4, 1_6] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: snake_case = [2, 2, 2_0] snake_case = [3, 1_2, 1_6] snake_case = [1_9_2, 7_6_8, 1_0_2_4] snake_case = CvtForImageClassification(A ) snake_case = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) snake_case = image_size snake_case = torch.load(A , map_location=torch.device('cpu' ) ) snake_case = OrderedDict() snake_case = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: snake_case = list_of_state_dict + cls_token(A ) snake_case = list_of_state_dict + embeddings(A ) for cnt in range(config.depth[idx] ): snake_case = list_of_state_dict + attention(A , A ) snake_case = list_of_state_dict + final() for gg in list_of_state_dict: print(A ) for i in range(len(A ) ): snake_case = original_weights[list_of_state_dict[i][1]] model.load_state_dict(A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( "--cvt_model", default="cvt-w24", type=str, help="Name of the cvt model you'd like to convert.", ) parser.add_argument( "--image_size", default=3_8_4, type=int, help="Input Image Size", ) parser.add_argument( "--cvt_file_name", default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth", type=str, help="Input Image Size", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) lowerCAmelCase_ = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
332
1
'''simple docstring''' class lowerCamelCase ( __lowerCAmelCase ): pass class lowerCamelCase ( __lowerCAmelCase ): pass class lowerCamelCase : def __init__( self ) -> Dict: snake_case = [ [], [], [], ] def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> None: try: if len(self.queues[priority] ) >= 100: raise OverflowError('Maximum queue size is 100' ) self.queues[priority].append(lowercase_ ) except IndexError: raise ValueError('Valid priorities are 0, 1, and 2' ) def _lowerCamelCase ( self ) -> int: for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError('All queues are empty' ) def __str__( self ) -> str: return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) ) class lowerCamelCase : def __init__( self ) -> str: snake_case = [] def _lowerCamelCase ( self, lowercase_ ) -> None: if len(self.queue ) == 100: raise OverFlowError('Maximum queue size is 100' ) self.queue.append(lowercase_ ) def _lowerCamelCase ( self ) -> int: if not self.queue: raise UnderFlowError('The queue is empty' ) else: snake_case = min(self.queue ) self.queue.remove(lowercase_ ) return data def __str__( self ) -> str: return str(self.queue ) def __magic_name__ ( ) -> Dict: snake_case = FixedPriorityQueue() fpq.enqueue(0 , 1_0 ) fpq.enqueue(1 , 7_0 ) fpq.enqueue(0 , 1_0_0 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 6_4 ) fpq.enqueue(0 , 1_2_8 ) print(A ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(A ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def __magic_name__ ( ) -> Union[str, Any]: snake_case = ElementPriorityQueue() epq.enqueue(1_0 ) epq.enqueue(7_0 ) epq.enqueue(1_0_0 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(6_4 ) epq.enqueue(1_2_8 ) print(A ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(A ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
332
'''simple docstring''' from pathlib import Path import fire def __magic_name__ ( A , A , A ) -> Union[str, Any]: snake_case = Path(A ) snake_case = Path(A ) dest_dir.mkdir(exist_ok=A ) for path in src_dir.iterdir(): snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n] snake_case = dest_dir.joinpath(path.name ) print(A ) dest_path.open('w' ).write('\n'.join(A ) ) if __name__ == "__main__": fire.Fire(minify)
332
1
'''simple docstring''' # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def __magic_name__ ( A=None ) -> Any: if subparsers is not None: snake_case = subparsers.add_parser('env' ) else: snake_case = argparse.ArgumentParser('Accelerate env command' ) parser.add_argument( '--config_file' , default=A , help='The config file to use for the default values in the launching script.' ) if subparsers is not None: parser.set_defaults(func=A ) return parser def __magic_name__ ( A ) -> str: snake_case = torch.__version__ snake_case = torch.cuda.is_available() snake_case = is_xpu_available() snake_case = is_npu_available() snake_case = 'Not found' # Get the default from the config file. if args.config_file is not None or os.path.isfile(A ): snake_case = load_config_from_file(args.config_file ).to_dict() snake_case = { '`Accelerate` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'Numpy version': np.__version__, 'PyTorch version (GPU?)': F'''{pt_version} ({pt_cuda_available})''', 'PyTorch XPU available': str(A ), 'PyTorch NPU available': str(A ), 'System RAM': F'''{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB''', } if pt_cuda_available: snake_case = torch.cuda.get_device_name() print('\nCopy-and-paste the text below in your GitHub issue\n' ) print('\n'.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) ) print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' ) snake_case = ( '\n'.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(A , A ) else F'''\t{accelerate_config}''' ) print(A ) snake_case = accelerate_config return info def __magic_name__ ( ) -> int: snake_case = env_command_parser() snake_case = parser.parse_args() env_command(A ) return 0 if __name__ == "__main__": raise SystemExit(main())
332
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) lowerCAmelCase_ = pytest.mark.integration @pytest.mark.parametrize('path' , ['paws', 'csv'] ) def __magic_name__ ( A , A ) -> Union[str, Any]: inspect_dataset(A , A ) snake_case = path + '.py' assert script_name in os.listdir(A ) assert "__pycache__" not in os.listdir(A ) @pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.parametrize('path' , ['accuracy'] ) def __magic_name__ ( A , A ) -> int: inspect_metric(A , A ) snake_case = path + '.py' assert script_name in os.listdir(A ) assert "__pycache__" not in os.listdir(A ) @pytest.mark.parametrize( 'path, config_name, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> List[str]: snake_case = get_dataset_config_info(A , config_name=A ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __magic_name__ ( A , A , A ) -> Any: with pytest.raises(A ): get_dataset_config_info(A , config_name=A ) @pytest.mark.parametrize( 'path, expected' , [ ('squad', 'plain_text'), ('acronym_identification', 'default'), ('lhoestq/squad', 'plain_text'), ('lhoestq/test', 'default'), ('lhoestq/demo1', 'lhoestq--demo1'), ('dalle-mini/wit', 'dalle-mini--wit'), ] , ) def __magic_name__ ( A , A ) -> Dict: snake_case = get_dataset_config_names(A ) assert expected in config_names @pytest.mark.parametrize( 'path, expected_configs, expected_splits_in_first_config' , [ ('squad', ['plain_text'], ['train', 'validation']), ('dalle-mini/wit', ['dalle-mini--wit'], ['train']), ('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> List[str]: snake_case = get_dataset_infos(A ) assert list(infos.keys() ) == expected_configs snake_case = expected_configs[0] assert expected_config in infos snake_case = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( 'path, expected_config, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> Any: snake_case = get_dataset_infos(A ) assert expected_config in infos snake_case = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __magic_name__ ( A , A , A ) -> int: with pytest.raises(A ): get_dataset_split_names(A , config_name=A )
332
1
'''simple docstring''' from typing import List import numpy as np def __magic_name__ ( A ) -> int: snake_case = {key: len(A ) for key, value in gen_kwargs.items() if isinstance(A , A )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( 'Sharding is ambiguous for this dataset: ' + 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n' + '\n'.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ' + 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.' ) ) snake_case = max(lists_lengths.values() , default=0 ) return max(1 , A ) def __magic_name__ ( A , A ) -> List[range]: snake_case = [] for group_idx in range(A ): snake_case = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break snake_case = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 snake_case = range(A , start + num_shards_to_add ) shards_indices_per_group.append(A ) return shards_indices_per_group def __magic_name__ ( A , A ) -> List[dict]: snake_case = _number_of_shards_in_gen_kwargs(A ) if num_shards == 1: return [dict(A )] else: snake_case = _distribute_shards(num_shards=A , max_num_jobs=A ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(A , A ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(A ) ) ] def __magic_name__ ( A ) -> dict: return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , A ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def __magic_name__ ( A , A ) -> dict: snake_case = {len(A ) for value in gen_kwargs.values() if isinstance(A , A )} snake_case = {} for size in list_sizes: snake_case = list(range(A ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes snake_case = dict(A ) for key, value in shuffled_kwargs.items(): if isinstance(A , A ): snake_case = [value[i] for i in indices_per_size[len(A )]] return shuffled_kwargs
332
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { "configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"], "processing_git": ["GitProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ "GIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GitForCausalLM", "GitModel", "GitPreTrainedModel", "GitVisionModel", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
332
1
'''simple docstring''' import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL lowerCAmelCase_ = logging.get_logger(__name__) def __magic_name__ ( A , A , A , A ) -> Tuple[int, int]: def constraint_to_multiple_of(A , A , A=0 , A=None ): snake_case = round(val / multiple ) * multiple if max_val is not None and x > max_val: snake_case = math.floor(val / multiple ) * multiple if x < min_val: snake_case = math.ceil(val / multiple ) * multiple return x snake_case = (output_size, output_size) if isinstance(A , A ) else output_size snake_case , snake_case = get_image_size(A ) snake_case , snake_case = output_size # determine new height and width snake_case = output_height / input_height snake_case = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width snake_case = scale_width else: # fit height snake_case = scale_height snake_case = constraint_to_multiple_of(scale_height * input_height , multiple=A ) snake_case = constraint_to_multiple_of(scale_width * input_width , multiple=A ) return (new_height, new_width) class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = ['''pixel_values'''] def __init__( self, lowercase_ = True, lowercase_ = None, lowercase_ = PILImageResampling.BILINEAR, lowercase_ = False, lowercase_ = 1, lowercase_ = True, lowercase_ = 1 / 255, lowercase_ = True, lowercase_ = None, lowercase_ = None, **lowercase_, ) -> None: super().__init__(**lowercase_ ) snake_case = size if size is not None else {'height': 384, 'width': 384} snake_case = get_size_dict(lowercase_ ) snake_case = do_resize snake_case = size snake_case = keep_aspect_ratio snake_case = ensure_multiple_of snake_case = resample snake_case = do_rescale snake_case = rescale_factor snake_case = do_normalize snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ = False, lowercase_ = 1, lowercase_ = PILImageResampling.BICUBIC, lowercase_ = None, **lowercase_, ) -> np.ndarray: snake_case = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) snake_case = get_resize_output_image_size( lowercase_, output_size=(size['height'], size['width']), keep_aspect_ratio=lowercase_, multiple=lowercase_, ) return resize(lowercase_, size=lowercase_, resample=lowercase_, data_format=lowercase_, **lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ = None, **lowercase_, ) -> List[str]: return rescale(lowercase_, scale=lowercase_, data_format=lowercase_, **lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_ = None, **lowercase_, ) -> np.ndarray: return normalize(lowercase_, mean=lowercase_, std=lowercase_, data_format=lowercase_, **lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = ChannelDimension.FIRST, **lowercase_, ) -> PIL.Image.Image: snake_case = do_resize if do_resize is not None else self.do_resize snake_case = size if size is not None else self.size snake_case = get_size_dict(lowercase_ ) snake_case = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio snake_case = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of snake_case = resample if resample is not None else self.resample snake_case = do_rescale if do_rescale is not None else self.do_rescale snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case = do_normalize if do_normalize is not None else self.do_normalize snake_case = image_mean if image_mean is not None else self.image_mean snake_case = image_std if image_std is not None else self.image_std snake_case = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. snake_case = [to_numpy_array(lowercase_ ) for image in images] if do_resize: snake_case = [self.resize(image=lowercase_, size=lowercase_, resample=lowercase_ ) for image in images] if do_rescale: snake_case = [self.rescale(image=lowercase_, scale=lowercase_ ) for image in images] if do_normalize: snake_case = [self.normalize(image=lowercase_, mean=lowercase_, std=lowercase_ ) for image in images] snake_case = [to_channel_dimension_format(lowercase_, lowercase_ ) for image in images] snake_case = {'pixel_values': images} return BatchFeature(data=lowercase_, tensor_type=lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple: snake_case = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowercase_ ) != len(lowercase_ ): raise ValueError( 'Make sure that you pass in as many target sizes as the batch dimension of the logits' ) if is_torch_tensor(lowercase_ ): snake_case = target_sizes.numpy() snake_case = [] for idx in range(len(lowercase_ ) ): snake_case = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode='bilinear', align_corners=lowercase_ ) snake_case = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowercase_ ) else: snake_case = logits.argmax(dim=1 ) snake_case = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
332
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu lowerCAmelCase_ = False class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCamelCase ( self ) -> List[Any]: return 12 @property def _lowerCamelCase ( self ) -> Dict: return 12 @property def _lowerCamelCase ( self ) -> List[Any]: return 32 @property def _lowerCamelCase ( self ) -> List[Any]: torch.manual_seed(0 ) snake_case = VQModel( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, ) return model @property def _lowerCamelCase ( self ) -> List[Any]: snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def _lowerCamelCase ( self ) -> Tuple: torch.manual_seed(0 ) snake_case = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(lowercase_ ) @property def _lowerCamelCase ( self ) -> str: torch.manual_seed(0 ) snake_case = 12 snake_case = 12 snake_case = { 'attention_bias': True, 'cross_attention_dim': 32, 'attention_head_dim': height * width, 'num_attention_heads': 1, 'num_vector_embeds': self.num_embed, 'num_embeds_ada_norm': self.num_embeds_ada_norm, 'norm_num_groups': 32, 'sample_size': width, 'activation_fn': 'geglu-approximate', } snake_case = TransformeraDModel(**lowercase_ ) return model def _lowerCamelCase ( self ) -> Tuple: snake_case = 'cpu' snake_case = self.dummy_vqvae snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_transformer snake_case = VQDiffusionScheduler(self.num_embed ) snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ ) snake_case = VQDiffusionPipeline( vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'teddy bear playing in the pool' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( [prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = 'cpu' snake_case = self.dummy_vqvae snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_transformer snake_case = VQDiffusionScheduler(self.num_embed ) snake_case = LearnedClassifierFreeSamplingEmbeddings( learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length ) snake_case = VQDiffusionPipeline( vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'teddy bear playing in the pool' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( [prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self ) -> str: snake_case = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' ) snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' ) snake_case = pipeline.to(lowercase_ ) pipeline.set_progress_bar_config(disable=lowercase_ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipeline( 'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', ) snake_case = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
332
1
'''simple docstring''' from functools import reduce lowerCAmelCase_ = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def __magic_name__ ( A = N ) -> int: return max( # mypy cannot properly interpret reduce int(reduce(lambda A , A : str(int(A ) * int(A ) ) , n[i : i + 1_3] ) ) for i in range(len(A ) - 1_2 ) ) if __name__ == "__main__": print(f"{solution() = }")
332
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase ( metaclass=__lowerCAmelCase ): snake_case_ = ['''note_seq'''] def __init__( self, *lowercase_, **lowercase_ ) -> str: requires_backends(self, ['note_seq'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]: requires_backends(cls, ['note_seq'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]: requires_backends(cls, ['note_seq'] )
332
1
'''simple docstring''' def __magic_name__ ( A , A ) -> float: if discount_rate < 0: raise ValueError('Discount rate cannot be negative' ) if not cash_flows: raise ValueError('Cash flows list cannot be empty' ) snake_case = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(A ) ) return round(A , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
332
'''simple docstring''' import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor lowerCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase ( __lowerCAmelCase ): def __init__( self, *lowercase_, **lowercase_ ) -> None: warnings.warn( 'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DPTImageProcessor instead.', lowercase_, ) super().__init__(*lowercase_, **lowercase_ )
332
1
'''simple docstring''' from datetime import datetime import requests def __magic_name__ ( A ) -> bytes: snake_case = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url=' snake_case = requests.get(base_url + url ).json()[0]['urls'][0]['src'] return requests.get(A ).content if __name__ == "__main__": lowerCAmelCase_ = input("Enter Video/IGTV url: ").strip() lowerCAmelCase_ = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4" with open(file_name, "wb") as fp: fp.write(download_video(url)) print(f"Done. Video saved to disk as {file_name}.")
332
'''simple docstring''' import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset lowerCAmelCase_ = pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) lowerCAmelCase_ = dataset.iloc[:, 1:2].values lowerCAmelCase_ = dataset.iloc[:, 2].values lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0) lowerCAmelCase_ = PolynomialFeatures(degree=4) lowerCAmelCase_ = poly_reg.fit_transform(X) lowerCAmelCase_ = LinearRegression() pol_reg.fit(X_poly, y) def __magic_name__ ( ) -> Any: plt.scatter(A , A , color='red' ) plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' ) plt.title('Truth or Bluff (Linear Regression)' ) plt.xlabel('Position level' ) plt.ylabel('Salary' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
332
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase_ = { "configuration_speecht5": [ "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP", "SpeechT5Config", "SpeechT5HifiGanConfig", ], "feature_extraction_speecht5": ["SpeechT5FeatureExtractor"], "processing_speecht5": ["SpeechT5Processor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["SpeechT5Tokenizer"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ "SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST", "SpeechT5ForSpeechToText", "SpeechT5ForSpeechToSpeech", "SpeechT5ForTextToSpeech", "SpeechT5Model", "SpeechT5PreTrainedModel", "SpeechT5HifiGan", ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
332
'''simple docstring''' import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''''' snake_case_ = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) snake_case_ = None # compression type in fsspec. ex: "gzip" snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str: super().__init__(self, **lowercase_ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode snake_case = fsspec.open( lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={ 'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459 'trust_env': True, # Enable reading proxy env variables. **(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed. }, **(target_options or {}), ) snake_case = os.path.basename(self.file.path.split('::' )[0] ) snake_case = ( self.compressed_name[: self.compressed_name.rindex('.' )] if '.' in self.compressed_name else self.compressed_name ) snake_case = None @classmethod def _lowerCamelCase ( cls, lowercase_ ) -> Any: # compressed file paths are always relative to the archive root return super()._strip_protocol(lowercase_ ).lstrip('/' ) def _lowerCamelCase ( self ) -> Optional[Any]: if self.dir_cache is None: snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name} snake_case = {f['name']: f} def _lowerCamelCase ( self, lowercase_ ) -> str: return self.file.open().read() def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any: snake_case = self._strip_protocol(lowercase_ ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''bz2''' snake_case_ = '''bz2''' snake_case_ = '''.bz2''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''gzip''' snake_case_ = '''gzip''' snake_case_ = '''.gz''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''lz4''' snake_case_ = '''lz4''' snake_case_ = '''.lz4''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''xz''' snake_case_ = '''xz''' snake_case_ = '''.xz''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''zstd''' snake_case_ = '''zstd''' snake_case_ = '''.zst''' def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]: super().__init__( fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 snake_case = self.file.__enter__ class lowerCamelCase : def __init__( self, lowercase_ ) -> List[Any]: snake_case = file_ def __enter__( self ) -> Dict: self._file.__enter__() return self def __exit__( self, *lowercase_, **lowercase_ ) -> Dict: self._file.__exit__(*lowercase_, **lowercase_ ) def __iter__( self ) -> List[str]: return iter(self._file ) def _lowerCamelCase ( self ) -> List[str]: return next(self._file ) def __getattr__( self, lowercase_ ) -> List[Any]: return getattr(self._file, lowercase_ ) def fixed_enter(*lowercase_, **lowercase_ ): return WrappedFile(_enter(*lowercase_, **lowercase_ ) ) snake_case = fixed_enter
332
1
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A ) -> int: if not nums: return 0 snake_case = nums[0] snake_case = 0 for num in nums[1:]: snake_case , snake_case = ( max_excluding + num, max(A , A ), ) return max(A , A ) if __name__ == "__main__": import doctest doctest.testmod()
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A , A , A ) -> int | float: if len(A ) == 0: raise ValueError('find_max() arg is an empty sequence' ) if ( left >= len(A ) or left < -len(A ) or right >= len(A ) or right < -len(A ) ): raise IndexError('list index out of range' ) if left == right: return nums[left] snake_case = (left + right) >> 1 # the middle snake_case = find_max(A , A , A ) # find max in range[left, mid] snake_case = find_max(A , mid + 1 , A ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
332
1
'''simple docstring''' import argparse import json import subprocess def __magic_name__ ( A , A ) -> Optional[Any]: snake_case = [] snake_case = ( F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"''' ' https://api.github.com/repos/huggingface/transformers/actions/runners' ) snake_case = subprocess.run(A , shell=A , stdout=subprocess.PIPE ) snake_case = output.stdout.decode('utf-8' ) snake_case = json.loads(A ) snake_case = status['runners'] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(A ) # save the result so we can report them on Slack with open('offline_runners.txt' , 'w' ) as fp: fp.write(json.dumps(A ) ) if len(A ) > 0: snake_case = '\n'.join([x['name'] for x in offline_runners] ) raise ValueError(F'''The following runners are offline:\n{failed}''' ) if __name__ == "__main__": def __magic_name__ ( A ) -> int: return values.split(',' ) lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--target_runners", default=None, type=list_str, required=True, help="Comma-separated list of runners to check status.", ) parser.add_argument( "--token", default=None, type=str, required=True, help="A token that has actions:read permission." ) lowerCAmelCase_ = parser.parse_args() get_runner_status(args.target_runners, args.token)
332
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = 42 class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ): @register_to_config def __init__( self, lowercase_ = 3, lowercase_ = 3, lowercase_ = ("DownEncoderBlock2D",), lowercase_ = ("UpDecoderBlock2D",), lowercase_ = (64,), lowercase_ = 1, lowercase_ = "silu", lowercase_ = 3, lowercase_ = 32, lowercase_ = 256, lowercase_ = 32, lowercase_ = None, lowercase_ = 0.18_215, lowercase_ = "group", ) -> str: super().__init__() # pass init params to Encoder snake_case = Encoder( in_channels=lowercase_, out_channels=lowercase_, down_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, double_z=lowercase_, ) snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels snake_case = nn.Convad(lowercase_, lowercase_, 1 ) snake_case = VectorQuantizer(lowercase_, lowercase_, beta=0.25, remap=lowercase_, sane_index_shape=lowercase_ ) snake_case = nn.Convad(lowercase_, lowercase_, 1 ) # pass init params to Decoder snake_case = Decoder( in_channels=lowercase_, out_channels=lowercase_, up_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, norm_type=lowercase_, ) @apply_forward_hook def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> VQEncoderOutput: snake_case = self.encoder(lowercase_ ) snake_case = self.quant_conv(lowercase_ ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowercase_ ) @apply_forward_hook def _lowerCamelCase ( self, lowercase_, lowercase_ = False, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: # also go through quantization layer if not force_not_quantize: snake_case , snake_case , snake_case = self.quantize(lowercase_ ) else: snake_case = h snake_case = self.post_quant_conv(lowercase_ ) snake_case = self.decoder(lowercase_, quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: snake_case = sample snake_case = self.encode(lowercase_ ).latents snake_case = self.decode(lowercase_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowercase_ )
332
1
'''simple docstring''' import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class lowerCamelCase ( unittest.TestCase ): def __init__( self, lowercase_, lowercase_=13, lowercase_=7, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=99, lowercase_=32, lowercase_=5, lowercase_=4, lowercase_=37, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=16, lowercase_=2, lowercase_=0.02, lowercase_=4, ) -> Dict: snake_case = parent snake_case = batch_size snake_case = seq_length snake_case = is_training snake_case = use_attention_mask snake_case = use_token_type_ids snake_case = use_labels snake_case = vocab_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = intermediate_size snake_case = hidden_act snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_vocab_size snake_case = type_sequence_label_size snake_case = initializer_range snake_case = num_choices def _lowerCamelCase ( self ) -> int: snake_case = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) snake_case = None if self.use_attention_mask: snake_case = random_attention_mask([self.batch_size, self.seq_length] ) snake_case = None if self.use_token_type_ids: snake_case = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) snake_case = AlbertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowercase_, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def _lowerCamelCase ( self ) -> str: snake_case = self.prepare_config_and_inputs() snake_case , snake_case , snake_case , snake_case = config_and_inputs snake_case = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class lowerCamelCase ( __lowerCAmelCase , unittest.TestCase ): snake_case_ = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCamelCase ( self ) -> Tuple: snake_case = FlaxAlbertModelTester(self ) @slow def _lowerCamelCase ( self ) -> Union[str, Any]: for model_class_name in self.all_model_classes: snake_case = model_class_name.from_pretrained('albert-base-v2' ) snake_case = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowercase_ ) @require_flax class lowerCamelCase ( unittest.TestCase ): @slow def _lowerCamelCase ( self ) -> List[str]: snake_case = FlaxAlbertModel.from_pretrained('albert-base-v2' ) snake_case = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) snake_case = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) snake_case = model(lowercase_, attention_mask=lowercase_ )[0] snake_case = (1, 11, 768) self.assertEqual(output.shape, lowercase_ ) snake_case = np.array( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4], lowercase_, atol=1E-4 ) )
332
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int: snake_case = [0] snake_case = 42 for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target snake_case = 0 # the area corresponding to the grid that gives the product closest to target snake_case = 0 # an estimate of b, using the quadratic formula snake_case = 42 # the largest integer less than b_estimate snake_case = 42 # the largest integer less than b_estimate snake_case = 42 # the triangle number corresponding to b_floor snake_case = 42 # the triangle number corresponding to b_ceil snake_case = 42 for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 snake_case = floor(A ) snake_case = ceil(A ) snake_case = triangle_numbers[b_floor] snake_case = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): snake_case = triangle_b_first_guess * triangle_a snake_case = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): snake_case = triangle_b_second_guess * triangle_a snake_case = idx_a * b_ceil return area if __name__ == "__main__": print(f"{solution() = }")
332
1
'''simple docstring''' import heapq as hq import math from collections.abc import Iterator class lowerCamelCase : def __init__( self, lowercase_ ) -> Any: snake_case = str(id_ ) snake_case = None snake_case = None snake_case = [] snake_case = {} # {vertex:distance} def __lt__( self, lowercase_ ) -> Optional[int]: return self.key < other.key def __repr__( self ) -> str: return self.id def _lowerCamelCase ( self, lowercase_ ) -> str: self.neighbors.append(lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> Union[str, Any]: snake_case = weight def __magic_name__ ( A , A , A , A ) -> List[Any]: # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , A ) graph[b - 1].add_edge(graph[a - 1] , A ) def __magic_name__ ( A , A ) -> list: snake_case = [] for u in graph: snake_case = math.inf snake_case = None snake_case = 0 snake_case = graph[:] while q: snake_case = min(A ) q.remove(A ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): snake_case = u snake_case = u.edges[v.id] for i in range(1 , len(A ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def __magic_name__ ( A , A ) -> Iterator[tuple]: for u in graph: snake_case = math.inf snake_case = None snake_case = 0 snake_case = list(A ) hq.heapify(A ) while h: snake_case = hq.heappop(A ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): snake_case = u snake_case = u.edges[v.id] hq.heapify(A ) for i in range(1 , len(A ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def __magic_name__ ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
332
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = { "configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"], "processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["VisionTextDualEncoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["FlaxVisionTextDualEncoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["TFVisionTextDualEncoderModel"] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
332
1
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(__lowerCAmelCase ) class lowerCamelCase ( __lowerCAmelCase ): def __init__( self, **lowercase_ ) -> List[str]: super().__init__(**lowercase_ ) requires_backends(self, 'vision' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self, lowercase_, **lowercase_ ) -> Union[str, Any]: return super().__call__(lowercase_, **lowercase_ ) def _lowerCamelCase ( self, **lowercase_ ) -> Union[str, Any]: snake_case = {} if "candidate_labels" in kwargs: snake_case = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: snake_case = kwargs['hypothesis_template'] return preprocess_params, {}, {} def _lowerCamelCase ( self, lowercase_, lowercase_=None, lowercase_="This is a photo of {}." ) -> Optional[int]: snake_case = load_image(lowercase_ ) snake_case = self.image_processor(images=[image], return_tensors=self.framework ) snake_case = candidate_labels snake_case = [hypothesis_template.format(lowercase_ ) for x in candidate_labels] snake_case = self.tokenizer(lowercase_, return_tensors=self.framework, padding=lowercase_ ) snake_case = [text_inputs] return inputs def _lowerCamelCase ( self, lowercase_ ) -> Optional[int]: snake_case = model_inputs.pop('candidate_labels' ) snake_case = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0], lowercase_ ): snake_case = text_inputs[0] else: # Batching case. snake_case = text_inputs[0][0] snake_case = self.model(**lowercase_, **lowercase_ ) snake_case = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def _lowerCamelCase ( self, lowercase_ ) -> int: snake_case = model_outputs.pop('candidate_labels' ) snake_case = model_outputs['logits'][0] if self.framework == "pt": snake_case = logits.softmax(dim=-1 ).squeeze(-1 ) snake_case = probs.tolist() if not isinstance(lowercase_, lowercase_ ): snake_case = [scores] elif self.framework == "tf": snake_case = stable_softmax(lowercase_, axis=-1 ) snake_case = probs.numpy().tolist() else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) snake_case = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(lowercase_, lowercase_ ), key=lambda lowercase_ : -x[0] ) ] return result
332
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCamelCase : snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} ) snake_case_ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) snake_case_ = field( default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , ) snake_case_ = field( default=64 , metadata={ '''help''': ( '''The maximum number of tokens for the question. Questions longer than this will ''' '''be truncated to this length.''' ) } , ) snake_case_ = field( default=30 , metadata={ '''help''': ( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ) } , ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} ) snake_case_ = field( default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) snake_case_ = field( default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) snake_case_ = field( default=0 , metadata={ '''help''': ( '''language id of input for language-specific xlm models (see''' ''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)''' ) } , ) snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} ) class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''train''' snake_case_ = '''dev''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int: snake_case = args snake_case = is_language_sensitive snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(lowercase_, lowercase_ ): try: snake_case = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) snake_case = mode # Load data features from cache or dataset file snake_case = 'v2' if args.version_2_with_negative else 'v1' snake_case = os.path.join( cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. snake_case = cached_features_file + '.lock' with FileLock(lowercase_ ): if os.path.exists(lowercase_ ) and not args.overwrite_cache: snake_case = time.time() snake_case = torch.load(lowercase_ ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. snake_case = self.old_features['features'] snake_case = self.old_features.get('dataset', lowercase_ ) snake_case = self.old_features.get('examples', lowercase_ ) logger.info( F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' ' future run' ) else: if mode == Split.dev: snake_case = self.processor.get_dev_examples(args.data_dir ) else: snake_case = self.processor.get_train_examples(args.data_dir ) snake_case , snake_case = squad_convert_examples_to_features( examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, ) snake_case = time.time() torch.save( {'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self ) -> Tuple: return len(self.features ) def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]: # Convert to Tensors and build dataset snake_case = self.features[i] snake_case = torch.tensor(feature.input_ids, dtype=torch.long ) snake_case = torch.tensor(feature.attention_mask, dtype=torch.long ) snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long ) snake_case = torch.tensor(feature.cls_index, dtype=torch.long ) snake_case = torch.tensor(feature.p_mask, dtype=torch.float ) snake_case = torch.tensor(feature.is_impossible, dtype=torch.float ) snake_case = { 'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'cls_index': cls_index, 'p_mask': p_mask} ) if self.args.version_2_with_negative: inputs.update({'is_impossible': is_impossible} ) if self.is_language_sensitive: inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: snake_case = torch.tensor(feature.start_position, dtype=torch.long ) snake_case = torch.tensor(feature.end_position, dtype=torch.long ) inputs.update({'start_positions': start_positions, 'end_positions': end_positions} ) return inputs
332
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor lowerCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase ( __lowerCAmelCase ): def __init__( self, *lowercase_, **lowercase_ ) -> None: warnings.warn( 'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use VideoMAEImageProcessor instead.', lowercase_, ) super().__init__(*lowercase_, **lowercase_ )
332
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def __magic_name__ ( A , A , A ) -> Any: # Initialise PyTorch model snake_case = BertConfig.from_json_file(A ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case = BertForPreTraining(A ) # Load weights from tf checkpoint load_tf_weights_in_bert(A , A , A ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , A ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--bert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
332
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowerCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = ['''input_features''', '''attention_mask'''] def __init__( self, lowercase_=80, lowercase_=16000, lowercase_=80, lowercase_=0.0, lowercase_=True, lowercase_=True, lowercase_=True, **lowercase_, ) -> Optional[Any]: super().__init__(feature_size=lowercase_, sampling_rate=lowercase_, padding_value=lowercase_, **lowercase_ ) snake_case = num_mel_bins snake_case = do_ceptral_normalize snake_case = normalize_means snake_case = normalize_vars snake_case = True def _lowerCamelCase ( self, lowercase_, ) -> np.ndarray: snake_case = waveform * (2**15) # Kaldi compliance: 16-bit signed integers snake_case = torch.from_numpy(lowercase_ ).unsqueeze(0 ) snake_case = ta_kaldi.fbank(lowercase_, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _lowerCamelCase ( lowercase_, lowercase_, lowercase_ = True, lowercase_ = True, lowercase_ = 0.0, ) -> np.ndarray: # make sure we normalize float32 arrays if normalize_means: snake_case = x[:input_length].mean(axis=0 ) snake_case = np.subtract(lowercase_, lowercase_ ) if normalize_vars: snake_case = x[:input_length].std(axis=0 ) snake_case = np.divide(lowercase_, lowercase_ ) if input_length < x.shape[0]: snake_case = padding_value # make sure array is in float32 snake_case = x.astype(np.floataa ) return x def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[np.ndarray]: snake_case = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(lowercase_, lowercase_, self.normalize_means, self.normalize_vars, self.padding_value ) for x, n in zip(lowercase_, lowercase_ ) ] def __call__( self, lowercase_, lowercase_ = False, lowercase_ = None, lowercase_ = False, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, **lowercase_, ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) snake_case = isinstance(lowercase_, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) snake_case = is_batched_numpy or ( isinstance(lowercase_, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: snake_case = [np.asarray(lowercase_, dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(lowercase_, np.ndarray ): snake_case = np.asarray(lowercase_, dtype=np.floataa ) elif isinstance(lowercase_, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): snake_case = raw_speech.astype(np.floataa ) # always return batch if not is_batched: snake_case = [raw_speech] # extract fbank features snake_case = [self._extract_fbank_features(lowercase_ ) for waveform in raw_speech] # convert into correct format for padding snake_case = BatchFeature({'input_features': features} ) snake_case = self.pad( lowercase_, padding=lowercase_, max_length=lowercase_, truncation=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, **lowercase_, ) # make sure list is in array format snake_case = padded_inputs.get('input_features' ) if isinstance(input_features[0], lowercase_ ): snake_case = [np.asarray(lowercase_, dtype=np.floataa ) for feature in input_features] snake_case = padded_inputs.get('attention_mask' ) if attention_mask is not None: snake_case = [np.asarray(lowercase_, dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: snake_case = ( np.array(lowercase_, dtype=np.intaa ) if self._get_padding_strategies(lowercase_, max_length=lowercase_ ) is not PaddingStrategy.DO_NOT_PAD else None ) snake_case = self.normalize( padded_inputs['input_features'], attention_mask=lowercase_ ) if return_tensors is not None: snake_case = padded_inputs.convert_to_tensors(lowercase_ ) return padded_inputs
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A ) -> list: if len(A ) == 0: return [] snake_case , snake_case = min(A ), max(A ) snake_case = int(max_value - min_value ) + 1 snake_case = [[] for _ in range(A )] for i in my_list: buckets[int(i - min_value )].append(A ) return [v for bucket in buckets for v in sorted(A )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
332
1
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = tempfile.mkdtemp() snake_case = BlipImageProcessor() snake_case = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' ) snake_case = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' ) snake_case = InstructBlipProcessor(lowercase_, lowercase_, lowercase_ ) processor.save_pretrained(self.tmpdirname ) def _lowerCamelCase ( self, **lowercase_ ) -> Optional[int]: return AutoProcessor.from_pretrained(self.tmpdirname, **lowercase_ ).tokenizer def _lowerCamelCase ( self, **lowercase_ ) -> int: return AutoProcessor.from_pretrained(self.tmpdirname, **lowercase_ ).image_processor def _lowerCamelCase ( self, **lowercase_ ) -> str: return AutoProcessor.from_pretrained(self.tmpdirname, **lowercase_ ).qformer_tokenizer def _lowerCamelCase ( self ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) def _lowerCamelCase ( self ) -> Tuple: snake_case = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )] snake_case = [Image.fromarray(np.moveaxis(lowercase_, 0, -1 ) ) for x in image_inputs] return image_inputs def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = InstructBlipProcessor( tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor(), qformer_tokenizer=self.get_qformer_tokenizer(), ) processor.save_pretrained(self.tmpdirname ) snake_case = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)' ) snake_case = self.get_image_processor(do_normalize=lowercase_, padding_value=1.0 ) snake_case = InstructBlipProcessor.from_pretrained( self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=lowercase_, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer, lowercase_ ) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor, lowercase_ ) self.assertIsInstance(processor.qformer_tokenizer, lowercase_ ) def _lowerCamelCase ( self ) -> Any: snake_case = self.get_image_processor() snake_case = self.get_tokenizer() snake_case = self.get_qformer_tokenizer() snake_case = InstructBlipProcessor( tokenizer=lowercase_, image_processor=lowercase_, qformer_tokenizer=lowercase_ ) snake_case = self.prepare_image_inputs() snake_case = image_processor(lowercase_, return_tensors='np' ) snake_case = processor(images=lowercase_, return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 ) def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = self.get_image_processor() snake_case = self.get_tokenizer() snake_case = self.get_qformer_tokenizer() snake_case = InstructBlipProcessor( tokenizer=lowercase_, image_processor=lowercase_, qformer_tokenizer=lowercase_ ) snake_case = 'lower newer' snake_case = processor(text=lowercase_ ) snake_case = tokenizer(lowercase_, return_token_type_ids=lowercase_ ) snake_case = qformer_tokenizer(lowercase_, return_token_type_ids=lowercase_ ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key], encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key], encoded_processor['qformer_' + key] ) def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = self.get_image_processor() snake_case = self.get_tokenizer() snake_case = self.get_qformer_tokenizer() snake_case = InstructBlipProcessor( tokenizer=lowercase_, image_processor=lowercase_, qformer_tokenizer=lowercase_ ) snake_case = 'lower newer' snake_case = self.prepare_image_inputs() snake_case = processor(text=lowercase_, images=lowercase_ ) self.assertListEqual( list(inputs.keys() ), ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'], ) # test if it raises when no input is passed with pytest.raises(lowercase_ ): processor() def _lowerCamelCase ( self ) -> Any: snake_case = self.get_image_processor() snake_case = self.get_tokenizer() snake_case = self.get_qformer_tokenizer() snake_case = InstructBlipProcessor( tokenizer=lowercase_, image_processor=lowercase_, qformer_tokenizer=lowercase_ ) snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case = processor.batch_decode(lowercase_ ) snake_case = tokenizer.batch_decode(lowercase_ ) self.assertListEqual(lowercase_, lowercase_ ) def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = self.get_image_processor() snake_case = self.get_tokenizer() snake_case = self.get_qformer_tokenizer() snake_case = InstructBlipProcessor( tokenizer=lowercase_, image_processor=lowercase_, qformer_tokenizer=lowercase_ ) snake_case = 'lower newer' snake_case = self.prepare_image_inputs() snake_case = processor(text=lowercase_, images=lowercase_ ) self.assertListEqual( list(inputs.keys() ), ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'], )
332
'''simple docstring''' def __magic_name__ ( A ) -> float: return 1_0 - x * x def __magic_name__ ( A , A ) -> float: # Bolzano theory in order to find if there is a root between a and b if equation(A ) * equation(A ) >= 0: raise ValueError('Wrong space!' ) snake_case = a while (b - a) >= 0.01: # Find middle point snake_case = (a + b) / 2 # Check if middle point is root if equation(A ) == 0.0: break # Decide the side to repeat the steps if equation(A ) * equation(A ) < 0: snake_case = c else: snake_case = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
332
1
'''simple docstring''' from math import pi, sqrt def __magic_name__ ( A ) -> float: if num <= 0: raise ValueError('math domain error' ) if num > 171.5: raise OverflowError('math range error' ) elif num - int(A ) not in (0, 0.5): raise NotImplementedError('num must be an integer or a half-integer' ) elif num == 0.5: return sqrt(A ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def __magic_name__ ( ) -> None: assert gamma(0.5 ) == sqrt(A ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() lowerCAmelCase_ = 1.0 while num: lowerCAmelCase_ = float(input("Gamma of: ")) print(f"gamma({num}) = {gamma(num)}") print("\nEnter 0 to exit...")
332
'''simple docstring''' import pytest lowerCAmelCase_ = "__dummy_dataset1__" lowerCAmelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n" @pytest.fixture def __magic_name__ ( ) -> List[Any]: return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def __magic_name__ ( ) -> Union[str, Any]: return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def __magic_name__ ( A , A , A ) -> Optional[int]: snake_case = dataset_loading_script_name snake_case = tmp_path / 'datasets' / script_name script_dir.mkdir(parents=A ) snake_case = script_dir / F'''{script_name}.py''' with open(A , 'w' ) as f: f.write(A ) return str(A )
332
1
'''simple docstring''' from math import pi, sqrt, tan def __magic_name__ ( A ) -> float: if side_length < 0: raise ValueError('surface_area_cube() only accepts non-negative values' ) return 6 * side_length**2 def __magic_name__ ( A , A , A ) -> float: if length < 0 or breadth < 0 or height < 0: raise ValueError('surface_area_cuboid() only accepts non-negative values' ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def __magic_name__ ( A ) -> float: if radius < 0: raise ValueError('surface_area_sphere() only accepts non-negative values' ) return 4 * pi * radius**2 def __magic_name__ ( A ) -> float: if radius < 0: raise ValueError('surface_area_hemisphere() only accepts non-negative values' ) return 3 * pi * radius**2 def __magic_name__ ( A , A ) -> float: if radius < 0 or height < 0: raise ValueError('surface_area_cone() only accepts non-negative values' ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def __magic_name__ ( A , A , A ) -> float: if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( 'surface_area_conical_frustum() only accepts non-negative values' ) snake_case = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def __magic_name__ ( A , A ) -> float: if radius < 0 or height < 0: raise ValueError('surface_area_cylinder() only accepts non-negative values' ) return 2 * pi * radius * (height + radius) def __magic_name__ ( A , A ) -> float: if torus_radius < 0 or tube_radius < 0: raise ValueError('surface_area_torus() only accepts non-negative values' ) if torus_radius < tube_radius: raise ValueError( 'surface_area_torus() does not support spindle or self intersecting tori' ) return 4 * pow(A , 2 ) * torus_radius * tube_radius def __magic_name__ ( A , A ) -> float: if length < 0 or width < 0: raise ValueError('area_rectangle() only accepts non-negative values' ) return length * width def __magic_name__ ( A ) -> float: if side_length < 0: raise ValueError('area_square() only accepts non-negative values' ) return side_length**2 def __magic_name__ ( A , A ) -> float: if base < 0 or height < 0: raise ValueError('area_triangle() only accepts non-negative values' ) return (base * height) / 2 def __magic_name__ ( A , A , A ) -> float: if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError('area_triangle_three_sides() only accepts non-negative values' ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError('Given three sides do not form a triangle' ) snake_case = (sidea + sidea + sidea) / 2 snake_case = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def __magic_name__ ( A , A ) -> float: if base < 0 or height < 0: raise ValueError('area_parallelogram() only accepts non-negative values' ) return base * height def __magic_name__ ( A , A , A ) -> float: if basea < 0 or basea < 0 or height < 0: raise ValueError('area_trapezium() only accepts non-negative values' ) return 1 / 2 * (basea + basea) * height def __magic_name__ ( A ) -> float: if radius < 0: raise ValueError('area_circle() only accepts non-negative values' ) return pi * radius**2 def __magic_name__ ( A , A ) -> float: if radius_x < 0 or radius_y < 0: raise ValueError('area_ellipse() only accepts non-negative values' ) return pi * radius_x * radius_y def __magic_name__ ( A , A ) -> float: if diagonal_a < 0 or diagonal_a < 0: raise ValueError('area_rhombus() only accepts non-negative values' ) return 1 / 2 * diagonal_a * diagonal_a def __magic_name__ ( A , A ) -> float: if not isinstance(A , A ) or sides < 3: raise ValueError( 'area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides' ) elif length < 0: raise ValueError( 'area_reg_polygon() only accepts non-negative values as \ length of a side' ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print("[DEMO] Areas of various geometric shapes: \n") print(f"Rectangle: {area_rectangle(1_0, 2_0) = }") print(f"Square: {area_square(1_0) = }") print(f"Triangle: {area_triangle(1_0, 1_0) = }") print(f"Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }") print(f"Parallelogram: {area_parallelogram(1_0, 2_0) = }") print(f"Rhombus: {area_rhombus(1_0, 2_0) = }") print(f"Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }") print(f"Circle: {area_circle(2_0) = }") print(f"Ellipse: {area_ellipse(1_0, 2_0) = }") print("\nSurface Areas of various geometric shapes: \n") print(f"Cube: {surface_area_cube(2_0) = }") print(f"Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }") print(f"Sphere: {surface_area_sphere(2_0) = }") print(f"Hemisphere: {surface_area_hemisphere(2_0) = }") print(f"Cone: {surface_area_cone(1_0, 2_0) = }") print(f"Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }") print(f"Cylinder: {surface_area_cylinder(1_0, 2_0) = }") print(f"Torus: {surface_area_torus(2_0, 1_0) = }") print(f"Equilateral Triangle: {area_reg_polygon(3, 1_0) = }") print(f"Square: {area_reg_polygon(4, 1_0) = }") print(f"Reqular Pentagon: {area_reg_polygon(5, 1_0) = }")
332
'''simple docstring''' from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time lowerCAmelCase_ = Lock() def __magic_name__ ( A , A , A , A , A , A , A ) -> Any: global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 1_0 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(A ) process_lock.release() # receive your right neighbor's value process_lock.acquire() snake_case = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left snake_case = min(A , A ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(A ) process_lock.release() # receive your left neighbor's value process_lock.acquire() snake_case = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right snake_case = max(A , A ) # after all swaps are performed, send the values back to main result_pipe[1].send(A ) def __magic_name__ ( A ) -> str: snake_case = [] snake_case = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop snake_case = Pipe() snake_case = Pipe() process_array_.append( Process( target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) snake_case = temp_rs snake_case = temp_rr for i in range(1 , len(A ) - 1 ): snake_case = Pipe() snake_case = Pipe() process_array_.append( Process( target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) snake_case = temp_rs snake_case = temp_rr process_array_.append( Process( target=A , args=( len(A ) - 1, arr[len(A ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(A ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(A ) ): snake_case = result_pipe[p][0].recv() process_array_[p].join() return arr def __magic_name__ ( ) -> Tuple: snake_case = list(range(1_0 , 0 , -1 ) ) print('Initial List' ) print(*A ) snake_case = odd_even_transposition(A ) print('Sorted List\n' ) print(*A ) if __name__ == "__main__": main()
332
1
'''simple docstring''' import os from datetime import datetime as dt from github import Github lowerCAmelCase_ = [ "good first issue", "good second issue", "good difficult issue", "enhancement", "new pipeline/model", "new scheduler", "wip", ] def __magic_name__ ( ) -> Union[str, Any]: snake_case = Github(os.environ['GITHUB_TOKEN'] ) snake_case = g.get_repo('huggingface/diffusers' ) snake_case = repo.get_issues(state='open' ) for issue in open_issues: snake_case = sorted(issue.get_comments() , key=lambda A : i.created_at , reverse=A ) snake_case = comments[0] if len(A ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='closed' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='open' ) issue.remove_from_labels('stale' ) elif ( (dt.utcnow() - issue.updated_at).days > 2_3 and (dt.utcnow() - issue.created_at).days >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) issue.add_to_labels('stale' ) if __name__ == "__main__": main()
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A ) -> None: create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] ) def __magic_name__ ( A , A , A , A , ) -> None: if index == len(A ): print(A ) return for i in range(len(A ) ): if not index_used[i]: current_sequence.append(sequence[i] ) snake_case = True create_state_space_tree(A , A , index + 1 , A ) current_sequence.pop() snake_case = False lowerCAmelCase_ = [3, 1, 2, 4] generate_all_permutations(sequence) lowerCAmelCase_ = ["A", "B", "C"] generate_all_permutations(sequence_a)
332
1
'''simple docstring''' def __magic_name__ ( A ) -> Optional[Any]: snake_case = len(A ) while cur > 1: # Find the maximum number in arr snake_case = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi snake_case = arr[mi::-1] + arr[mi + 1 : len(A )] # Reverse whole list snake_case = arr[cur - 1 :: -1] + arr[cur : len(A )] cur -= 1 return arr if __name__ == "__main__": lowerCAmelCase_ = input("Enter numbers separated by a comma:\n").strip() lowerCAmelCase_ = [int(item) for item in user_input.split(",")] print(pancake_sort(unsorted))
332
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''roberta''' def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple: super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ ) snake_case = vocab_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = hidden_act snake_case = intermediate_size snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_vocab_size snake_case = initializer_range snake_case = layer_norm_eps snake_case = position_embedding_type snake_case = use_cache snake_case = classifier_dropout class lowerCamelCase ( __lowerCAmelCase ): @property def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
332
1
'''simple docstring''' import heapq import sys import numpy as np lowerCAmelCase_ = tuple[int, int] class lowerCamelCase : def __init__( self ) -> Any: snake_case = [] snake_case = set() def _lowerCamelCase ( self ) -> Optional[Any]: if not self.empty(): return self.elements[0][0] else: return float('inf' ) def _lowerCamelCase ( self ) -> Any: return len(self.elements ) == 0 def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> Union[str, Any]: if item not in self.set: heapq.heappush(self.elements, (priority, item) ) self.set.add(lowercase_ ) else: # update # print("update", item) snake_case = [] ((snake_case) , (snake_case)) = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((snake_case) , (snake_case)) = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements, (pro, xxx) ) def _lowerCamelCase ( self, lowercase_ ) -> Dict: if item in self.set: self.set.remove(lowercase_ ) snake_case = [] ((snake_case) , (snake_case)) = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((snake_case) , (snake_case)) = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements, (prito, yyy) ) def _lowerCamelCase ( self ) -> Tuple: return self.elements[0][1] def _lowerCamelCase ( self ) -> Dict: ((snake_case) , (snake_case)) = heapq.heappop(self.elements ) self.set.remove(lowercase_ ) return (priority, item) def __magic_name__ ( A , A ) -> Optional[Any]: # euclidean distance snake_case = np.array(A ) snake_case = np.array(A ) return np.linalg.norm(a - b ) def __magic_name__ ( A , A ) -> str: # integer division by time variable return consistent_heuristic(A , A ) // t def __magic_name__ ( A , A ) -> List[Any]: # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def __magic_name__ ( A , A , A , A ) -> Dict: snake_case = g_function[start] + Wa * heuristics[i](A , A ) return ans def __magic_name__ ( A , A , A ) -> Optional[Any]: snake_case = np.chararray((n, n) ) for i in range(A ): for j in range(A ): snake_case = '*' for i in range(A ): for j in range(A ): if (j, (n - 1) - i) in blocks: snake_case = '#' snake_case = '-' snake_case = back_pointer[goal] while x != start: ((snake_case) , (snake_case)) = x # print(x) snake_case = '-' snake_case = back_pointer[x] snake_case = '-' for i in range(A ): for j in range(A ): if (i, j) == (0, n - 1): print(grid[i][j] , end=' ' ) print('<-- End position' , end=' ' ) else: print(grid[i][j] , end=' ' ) print() print('^' ) print('Start position' ) print() print('# is an obstacle' ) print('- is the path taken by algorithm' ) print('PATH TAKEN BY THE ALGORITHM IS:-' ) snake_case = back_pointer[goal] while x != start: print(A , end=' ' ) snake_case = back_pointer[x] print(A ) sys.exit() def __magic_name__ ( A ) -> Dict: if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def __magic_name__ ( A , A , A , A , A , A , A , A , ) -> int: for itera in range(A ): open_list[itera].remove_element(A ) # print("s", s) # print("j", j) ((snake_case) , (snake_case)) = s snake_case = (x - 1, y) snake_case = (x + 1, y) snake_case = (x, y + 1) snake_case = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(A ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(A ) snake_case = -1 snake_case = float('inf' ) if valid(A ) and g_function[neighbours] > g_function[s] + 1: snake_case = g_function[s] + 1 snake_case = s if neighbours not in close_list_anchor: open_list[0].put(A , key(A , 0 , A , A ) ) if neighbours not in close_list_inad: for var in range(1 , A ): if key(A , A , A , A ) <= Wa * key( A , 0 , A , A ): open_list[j].put( A , key(A , A , A , A ) ) def __magic_name__ ( ) -> List[Any]: snake_case = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(1_5 , 2_0 ): some_list.append((x, 1_7) ) for x in range(1_0 , 1_9 ): for y in range(1 , 1_5 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(1_2 , 1_9 ): some_list.append((x, y) ) for x in range(3 , 1_3 ): for y in range(1_6 , 1_9 ): some_list.append((x, y) ) return some_list lowerCAmelCase_ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} lowerCAmelCase_ = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (1_0, 1), (1_1, 1), (1_2, 1), (1_3, 1), (1_4, 1), (1_5, 1), (1_6, 1), (1_7, 1), (1_8, 1), (1_9, 1), ] lowerCAmelCase_ = make_common_ground() lowerCAmelCase_ = blocks_blk # hyper parameters lowerCAmelCase_ = 1 lowerCAmelCase_ = 1 lowerCAmelCase_ = 2_0 lowerCAmelCase_ = 3 # one consistent and two other inconsistent # start and end destination lowerCAmelCase_ = (0, 0) lowerCAmelCase_ = (n - 1, n - 1) lowerCAmelCase_ = 1 def __magic_name__ ( A , A , A ) -> Union[str, Any]: snake_case = {start: 0, goal: float('inf' )} snake_case = {start: -1, goal: -1} snake_case = [] snake_case = set() for i in range(A ): open_list.append(PriorityQueue() ) open_list[i].put(A , key(A , A , A , A ) ) snake_case = [] snake_case = [] while open_list[0].minkey() < float('inf' ): for i in range(1 , A ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('inf' ): do_something(A , A , A ) else: snake_case , snake_case = open_list[i].top_show() visited.add(A ) expand_state( A , A , A , A , A , A , A , A , ) close_list_inad.append(A ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('inf' ): do_something(A , A , A ) else: snake_case = open_list[0].top_show() visited.add(A ) expand_state( A , 0 , A , A , A , A , A , A , ) close_list_anchor.append(A ) print('No path found to goal' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(A ): if (j, i) in blocks: print('#' , end=' ' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('*' , end=' ' ) else: print('-' , end=' ' ) else: print('*' , end=' ' ) if (j, i) == (n - 1, n - 1): print('<-- End position' , end=' ' ) print() print('^' ) print('Start position' ) print() print('# is an obstacle' ) print('- is the path taken by algorithm' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
332
'''simple docstring''' import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} lowerCAmelCase_ = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } lowerCAmelCase_ = { "allenai/led-base-16384": 1_6_3_8_4, } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = LEDTokenizer snake_case_ = ['''input_ids''', '''attention_mask'''] def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int: super().__init__( lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, ) snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space: snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) ) snake_case = add_prefix_space snake_case = pre_tok_class(**lowercase_ ) snake_case = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` snake_case = 'post_processor' snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ ) if tokenizer_component_instance: snake_case = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: snake_case = tuple(state['sep'] ) if "cls" in state: snake_case = tuple(state['cls'] ) snake_case = False if state.get('add_prefix_space', lowercase_ ) != add_prefix_space: snake_case = add_prefix_space snake_case = True if state.get('trim_offsets', lowercase_ ) != trim_offsets: snake_case = trim_offsets snake_case = True if changes_to_apply: snake_case = getattr(lowercase_, state.pop('type' ) ) snake_case = component_class(**lowercase_ ) setattr(self.backend_tokenizer, lowercase_, lowercase_ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _lowerCamelCase ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _lowerCamelCase ( self, lowercase_ ) -> Any: snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value snake_case = value def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding: snake_case = kwargs.get('is_split_into_words', lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding: snake_case = kwargs.get('is_split_into_words', lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._encode_plus(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]: snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ ) return tuple(lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict: snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]: snake_case = [self.sep_token_id] snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict: snake_case = super()._pad( encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, ) # Load from model defaults if return_attention_mask is None: snake_case = 'attention_mask' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: snake_case = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ ) if needs_to_be_padded: snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` snake_case = ( encoded_inputs['global_attention_mask'] + [-1] * difference ) elif self.padding_side == "left": snake_case = [-1] * difference + encoded_inputs[ 'global_attention_mask' ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return encoded_inputs
332
1
'''simple docstring''' def __magic_name__ ( A ) -> bool: snake_case = (1 + 2_4 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def __magic_name__ ( A = 5_0_0_0 ) -> int: snake_case = [(i * (3 * i - 1)) // 2 for i in range(1 , A )] for i, pentagonal_i in enumerate(A ): for j in range(A , len(A ) ): snake_case = pentagonal_nums[j] snake_case = pentagonal_i + pentagonal_j snake_case = pentagonal_j - pentagonal_i if is_pentagonal(A ) and is_pentagonal(A ): return b return -1 if __name__ == "__main__": print(f"{solution() = }")
332
'''simple docstring''' import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def __magic_name__ ( A ) -> Tuple: snake_case = [] embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', F'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', F'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', F'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', F'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def __magic_name__ ( A , A ) -> Optional[int]: snake_case = [] attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def __magic_name__ ( A ) -> List[Any]: snake_case = [] token.append((F'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') ) return token def __magic_name__ ( ) -> Dict: snake_case = [] head.append(('layernorm.weight', 'norm.weight') ) head.append(('layernorm.bias', 'norm.bias') ) head.append(('classifier.weight', 'head.weight') ) head.append(('classifier.bias', 'head.bias') ) return head def __magic_name__ ( A , A , A , A ) -> int: snake_case = 'imagenet-1k-id2label.json' snake_case = 1_0_0_0 snake_case = 'huggingface/label-files' snake_case = num_labels snake_case = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) ) snake_case = {int(A ): v for k, v in idalabel.items()} snake_case = idalabel snake_case = {v: k for k, v in idalabel.items()} snake_case = snake_case = CvtConfig(num_labels=A , idalabel=A , labelaid=A ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13": snake_case = [1, 2, 1_0] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21": snake_case = [1, 4, 1_6] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: snake_case = [2, 2, 2_0] snake_case = [3, 1_2, 1_6] snake_case = [1_9_2, 7_6_8, 1_0_2_4] snake_case = CvtForImageClassification(A ) snake_case = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) snake_case = image_size snake_case = torch.load(A , map_location=torch.device('cpu' ) ) snake_case = OrderedDict() snake_case = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: snake_case = list_of_state_dict + cls_token(A ) snake_case = list_of_state_dict + embeddings(A ) for cnt in range(config.depth[idx] ): snake_case = list_of_state_dict + attention(A , A ) snake_case = list_of_state_dict + final() for gg in list_of_state_dict: print(A ) for i in range(len(A ) ): snake_case = original_weights[list_of_state_dict[i][1]] model.load_state_dict(A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( "--cvt_model", default="cvt-w24", type=str, help="Name of the cvt model you'd like to convert.", ) parser.add_argument( "--image_size", default=3_8_4, type=int, help="Input Image Size", ) parser.add_argument( "--cvt_file_name", default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth", type=str, help="Input Image Size", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) lowerCAmelCase_ = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
332
1
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCAmelCase_ = logging.get_logger(__name__) def __magic_name__ ( A ) -> int: snake_case = R'\w+[.]\d+' snake_case = re.findall(A , A ) for pat in pats: snake_case = key.replace(A , '_'.join(pat.split('.' ) ) ) return key def __magic_name__ ( A , A , A ) -> Any: snake_case = pt_tuple_key[:-1] + ('scale',) if ( any('norm' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): snake_case = pt_tuple_key[:-1] + ('scale',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: snake_case = pt_tuple_key[:-1] + ('scale',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: snake_case = pt_tuple_key[:-1] + ('embedding',) return renamed_pt_tuple_key, pt_tensor # conv layer snake_case = pt_tuple_key[:-1] + ('kernel',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: snake_case = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer snake_case = pt_tuple_key[:-1] + ('kernel',) if pt_tuple_key[-1] == "weight": snake_case = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight snake_case = pt_tuple_key[:-1] + ('weight',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias snake_case = pt_tuple_key[:-1] + ('bias',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def __magic_name__ ( A , A , A=4_2 ) -> str: # Step 1: Convert pytorch tensor to numpy snake_case = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params snake_case = flax_model.init_weights(PRNGKey(A ) ) snake_case = flatten_dict(A ) snake_case = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): snake_case = rename_key(A ) snake_case = tuple(renamed_pt_key.split('.' ) ) # Correctly rename weight parameters snake_case , snake_case = rename_key_and_reshape_tensor(A , A , A ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown snake_case = jnp.asarray(A ) return unflatten_dict(A )
332
'''simple docstring''' from pathlib import Path import fire def __magic_name__ ( A , A , A ) -> Union[str, Any]: snake_case = Path(A ) snake_case = Path(A ) dest_dir.mkdir(exist_ok=A ) for path in src_dir.iterdir(): snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n] snake_case = dest_dir.joinpath(path.name ) print(A ) dest_path.open('w' ).write('\n'.join(A ) ) if __name__ == "__main__": fire.Fire(minify)
332
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["XLNetTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["XLNetTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ "XLNET_PRETRAINED_MODEL_ARCHIVE_LIST", "XLNetForMultipleChoice", "XLNetForQuestionAnswering", "XLNetForQuestionAnsweringSimple", "XLNetForSequenceClassification", "XLNetForTokenClassification", "XLNetLMHeadModel", "XLNetModel", "XLNetPreTrainedModel", "load_tf_weights_in_xlnet", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ "TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLNetForMultipleChoice", "TFXLNetForQuestionAnsweringSimple", "TFXLNetForSequenceClassification", "TFXLNetForTokenClassification", "TFXLNetLMHeadModel", "TFXLNetMainLayer", "TFXLNetModel", "TFXLNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
332
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) lowerCAmelCase_ = pytest.mark.integration @pytest.mark.parametrize('path' , ['paws', 'csv'] ) def __magic_name__ ( A , A ) -> Union[str, Any]: inspect_dataset(A , A ) snake_case = path + '.py' assert script_name in os.listdir(A ) assert "__pycache__" not in os.listdir(A ) @pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.parametrize('path' , ['accuracy'] ) def __magic_name__ ( A , A ) -> int: inspect_metric(A , A ) snake_case = path + '.py' assert script_name in os.listdir(A ) assert "__pycache__" not in os.listdir(A ) @pytest.mark.parametrize( 'path, config_name, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> List[str]: snake_case = get_dataset_config_info(A , config_name=A ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __magic_name__ ( A , A , A ) -> Any: with pytest.raises(A ): get_dataset_config_info(A , config_name=A ) @pytest.mark.parametrize( 'path, expected' , [ ('squad', 'plain_text'), ('acronym_identification', 'default'), ('lhoestq/squad', 'plain_text'), ('lhoestq/test', 'default'), ('lhoestq/demo1', 'lhoestq--demo1'), ('dalle-mini/wit', 'dalle-mini--wit'), ] , ) def __magic_name__ ( A , A ) -> Dict: snake_case = get_dataset_config_names(A ) assert expected in config_names @pytest.mark.parametrize( 'path, expected_configs, expected_splits_in_first_config' , [ ('squad', ['plain_text'], ['train', 'validation']), ('dalle-mini/wit', ['dalle-mini--wit'], ['train']), ('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> List[str]: snake_case = get_dataset_infos(A ) assert list(infos.keys() ) == expected_configs snake_case = expected_configs[0] assert expected_config in infos snake_case = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( 'path, expected_config, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> Any: snake_case = get_dataset_infos(A ) assert expected_config in infos snake_case = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __magic_name__ ( A , A , A ) -> int: with pytest.raises(A ): get_dataset_split_names(A , config_name=A )
332
1
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A , A = None , A = None , A = False , ) -> tuple[int, float, str]: snake_case = cipher_alphabet or [chr(A ) for i in range(9_7 , 1_2_3 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) snake_case = { 'a': 0.08_497, 'b': 0.01_492, 'c': 0.02_202, 'd': 0.04_253, 'e': 0.11_162, 'f': 0.02_228, 'g': 0.02_015, 'h': 0.06_094, 'i': 0.07_546, 'j': 0.00_153, 'k': 0.01_292, 'l': 0.04_025, 'm': 0.02_406, 'n': 0.06_749, 'o': 0.07_507, 'p': 0.01_929, 'q': 0.00_095, 'r': 0.07_587, 's': 0.06_327, 't': 0.09_356, 'u': 0.02_758, 'v': 0.00_978, 'w': 0.02_560, 'x': 0.00_150, 'y': 0.01_994, 'z': 0.00_077, } else: # Custom frequencies dictionary snake_case = frequencies_dict if not case_sensitive: snake_case = ciphertext.lower() # Chi squared statistic values snake_case = {} # cycle through all of the shifts for shift in range(len(A ) ): snake_case = '' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet snake_case = (alphabet_letters.index(letter.lower() ) - shift) % len( A ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter snake_case = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: snake_case = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message snake_case = decrypted_with_shift.lower().count(A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies snake_case = frequencies[letter] * occurrences # Complete the chi squared statistic formula snake_case = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message snake_case = decrypted_with_shift.count(A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies snake_case = frequencies[letter] * occurrences # Complete the chi squared statistic formula snake_case = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary snake_case = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(A ) -> tuple[float, str]: return chi_squared_statistic_values[key] snake_case = min( A , key=A , ) # Get all the data from the most likely cipher (key, decoded message) ( ( snake_case ) , ( snake_case ) , ) = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
332
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { "configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"], "processing_git": ["GitProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ "GIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GitForCausalLM", "GitModel", "GitPreTrainedModel", "GitVisionModel", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
332
1
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): lowerCAmelCase_ = "pt" elif is_tf_available(): lowerCAmelCase_ = "tf" else: lowerCAmelCase_ = "jax" class lowerCamelCase ( __lowerCAmelCase , unittest.TestCase ): snake_case_ = PerceiverTokenizer snake_case_ = False def _lowerCamelCase ( self ) -> Any: super().setUp() snake_case = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _lowerCamelCase ( self ) -> Dict: return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' ) def _lowerCamelCase ( self, **lowercase_ ) -> PerceiverTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_=False, lowercase_=20, lowercase_=5 ) -> Tuple[str, list]: # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for Perceiver because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. snake_case = [] for i in range(len(lowercase_ ) ): try: snake_case = tokenizer.decode([i], clean_up_tokenization_spaces=lowercase_ ) except UnicodeDecodeError: pass toks.append((i, tok) ) snake_case = list(filter(lambda lowercase_ : re.match(r'^[ a-zA-Z]+$', t[1] ), lowercase_ ) ) snake_case = list(filter(lambda lowercase_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=lowercase_ ), lowercase_ ) ) if max_length is not None and len(lowercase_ ) > max_length: snake_case = toks[:max_length] if min_length is not None and len(lowercase_ ) < min_length and len(lowercase_ ) > 0: while len(lowercase_ ) < min_length: snake_case = toks + toks # toks_str = [t[1] for t in toks] snake_case = [t[0] for t in toks] # Ensure consistency snake_case = tokenizer.decode(lowercase_, clean_up_tokenization_spaces=lowercase_ ) if " " not in output_txt and len(lowercase_ ) > 1: snake_case = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=lowercase_ ) + ' ' + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=lowercase_ ) ) if with_prefix_space: snake_case = ' ' + output_txt snake_case = tokenizer.encode(lowercase_, add_special_tokens=lowercase_ ) return output_txt, output_ids def _lowerCamelCase ( self ) -> List[str]: snake_case = self.perceiver_tokenizer snake_case = 'Unicode €.' snake_case = tokenizer(lowercase_ ) snake_case = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded['input_ids'], lowercase_ ) # decoding snake_case = tokenizer.decode(lowercase_ ) self.assertEqual(lowercase_, '[CLS]Unicode €.[SEP]' ) snake_case = tokenizer('e è é ê ë' ) snake_case = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded['input_ids'], lowercase_ ) # decoding snake_case = tokenizer.decode(lowercase_ ) self.assertEqual(lowercase_, '[CLS]e è é ê ë[SEP]' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), '[CLS]e è é ê ë[SEP]' ) def _lowerCamelCase ( self ) -> List[Any]: snake_case = self.perceiver_tokenizer snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off snake_case = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on snake_case = tokenizer(lowercase_, padding=lowercase_, return_tensors=lowercase_ ) self.assertIsInstance(lowercase_, lowercase_ ) if FRAMEWORK != "jax": snake_case = list(batch.input_ids.numpy()[0] ) else: snake_case = list(batch.input_ids.tolist()[0] ) self.assertListEqual(lowercase_, lowercase_ ) self.assertEqual((2, 38), batch.input_ids.shape ) self.assertEqual((2, 38), batch.attention_mask.shape ) def _lowerCamelCase ( self ) -> str: snake_case = self.perceiver_tokenizer snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] snake_case = tokenizer(lowercase_, padding=lowercase_, return_tensors=lowercase_ ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids', lowercase_ ) self.assertIn('attention_mask', lowercase_ ) self.assertNotIn('decoder_input_ids', lowercase_ ) self.assertNotIn('decoder_attention_mask', lowercase_ ) def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = self.perceiver_tokenizer snake_case = [ 'Summary of the text.', 'Another summary.', ] snake_case = tokenizer( text_target=lowercase_, max_length=32, padding='max_length', truncation=lowercase_, return_tensors=lowercase_ ) self.assertEqual(32, targets['input_ids'].shape[1] ) def _lowerCamelCase ( self ) -> str: # safety check on max_len default value so we are sure the test works snake_case = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length, 42 ) # Now let's start the test snake_case = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc snake_case = tempfile.mkdtemp() snake_case = ' He is very happy, UNwant\u00E9d,running' snake_case = tokenizer.encode(lowercase_, add_special_tokens=lowercase_ ) tokenizer.save_pretrained(lowercase_ ) snake_case = tokenizer.__class__.from_pretrained(lowercase_ ) snake_case = after_tokenizer.encode(lowercase_, add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_, lowercase_ ) shutil.rmtree(lowercase_ ) snake_case = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc snake_case = tempfile.mkdtemp() snake_case = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) snake_case = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) snake_case = tokenizer.encode(lowercase_, add_special_tokens=lowercase_ ) tokenizer.save_pretrained(lowercase_ ) snake_case = tokenizer.__class__.from_pretrained(lowercase_ ) snake_case = after_tokenizer.encode(lowercase_, add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_, lowercase_ ) self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length, 42 ) snake_case = tokenizer.__class__.from_pretrained(lowercase_, model_max_length=43 ) self.assertEqual(tokenizer.model_max_length, 43 ) shutil.rmtree(lowercase_ ) def _lowerCamelCase ( self ) -> List[Any]: snake_case = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(lowercase_ ) with open(os.path.join(lowercase_, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file: snake_case = json.load(lowercase_ ) with open(os.path.join(lowercase_, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file: snake_case = json.load(lowercase_ ) snake_case = [F'''<extra_id_{i}>''' for i in range(125 )] snake_case = added_tokens_extra_ids + [ 'an_additional_special_token' ] snake_case = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(lowercase_, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile: json.dump(lowercase_, lowercase_ ) with open(os.path.join(lowercase_, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile: json.dump(lowercase_, lowercase_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files snake_case = tokenizer_class.from_pretrained( lowercase_, ) self.assertIn( 'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained snake_case = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=lowercase_ )] snake_case = tokenizer_class.from_pretrained( lowercase_, additional_special_tokens=lowercase_, ) self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), ) def _lowerCamelCase ( self ) -> Any: snake_case = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ), '�' ) def _lowerCamelCase ( self ) -> str: pass def _lowerCamelCase ( self ) -> Optional[Any]: pass def _lowerCamelCase ( self ) -> List[Any]: pass def _lowerCamelCase ( self ) -> Optional[Any]: pass def _lowerCamelCase ( self ) -> Optional[Any]: # The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character # strings and special added tokens as tokens snake_case = self.get_tokenizers(fast=lowercase_, do_lower_case=lowercase_ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]'] snake_case = tokenizer.convert_tokens_to_string(lowercase_ ) self.assertIsInstance(lowercase_, lowercase_ )
332
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu lowerCAmelCase_ = False class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCamelCase ( self ) -> List[Any]: return 12 @property def _lowerCamelCase ( self ) -> Dict: return 12 @property def _lowerCamelCase ( self ) -> List[Any]: return 32 @property def _lowerCamelCase ( self ) -> List[Any]: torch.manual_seed(0 ) snake_case = VQModel( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, ) return model @property def _lowerCamelCase ( self ) -> List[Any]: snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def _lowerCamelCase ( self ) -> Tuple: torch.manual_seed(0 ) snake_case = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(lowercase_ ) @property def _lowerCamelCase ( self ) -> str: torch.manual_seed(0 ) snake_case = 12 snake_case = 12 snake_case = { 'attention_bias': True, 'cross_attention_dim': 32, 'attention_head_dim': height * width, 'num_attention_heads': 1, 'num_vector_embeds': self.num_embed, 'num_embeds_ada_norm': self.num_embeds_ada_norm, 'norm_num_groups': 32, 'sample_size': width, 'activation_fn': 'geglu-approximate', } snake_case = TransformeraDModel(**lowercase_ ) return model def _lowerCamelCase ( self ) -> Tuple: snake_case = 'cpu' snake_case = self.dummy_vqvae snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_transformer snake_case = VQDiffusionScheduler(self.num_embed ) snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ ) snake_case = VQDiffusionPipeline( vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'teddy bear playing in the pool' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( [prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = 'cpu' snake_case = self.dummy_vqvae snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_transformer snake_case = VQDiffusionScheduler(self.num_embed ) snake_case = LearnedClassifierFreeSamplingEmbeddings( learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length ) snake_case = VQDiffusionPipeline( vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'teddy bear playing in the pool' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( [prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self ) -> str: snake_case = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' ) snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' ) snake_case = pipeline.to(lowercase_ ) pipeline.set_progress_bar_config(disable=lowercase_ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipeline( 'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', ) snake_case = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
332
1
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar lowerCAmelCase_ = TypeVar("T") lowerCAmelCase_ = TypeVar("U") class lowerCamelCase ( Generic[T, U] ): def __init__( self, lowercase_, lowercase_ ) -> Tuple: snake_case = key snake_case = val snake_case = None snake_case = None def __repr__( self ) -> str: return ( F'''Node: key: {self.key}, val: {self.val}, ''' F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}''' ) class lowerCamelCase ( Generic[T, U] ): def __init__( self ) -> None: snake_case = DoubleLinkedListNode(lowercase_, lowercase_ ) snake_case = DoubleLinkedListNode(lowercase_, lowercase_ ) snake_case , snake_case = self.rear, self.head def __repr__( self ) -> str: snake_case = ['DoubleLinkedList'] snake_case = self.head while node.next is not None: rep.append(str(lowercase_ ) ) snake_case = node.next rep.append(str(self.rear ) ) return ",\n ".join(lowercase_ ) def _lowerCamelCase ( self, lowercase_ ) -> None: snake_case = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None snake_case = node snake_case = previous snake_case = node snake_case = self.rear def _lowerCamelCase ( self, lowercase_ ) -> DoubleLinkedListNode[T, U] | None: if node.prev is None or node.next is None: return None snake_case = node.next snake_case = node.prev snake_case = None snake_case = None return node class lowerCamelCase ( Generic[T, U] ): snake_case_ = {} def __init__( self, lowercase_ ) -> Dict: snake_case = DoubleLinkedList() snake_case = capacity snake_case = 0 snake_case = 0 snake_case = 0 snake_case = {} def __repr__( self ) -> str: return ( F'''CacheInfo(hits={self.hits}, misses={self.miss}, ''' F'''capacity={self.capacity}, current size={self.num_keys})''' ) def __contains__( self, lowercase_ ) -> bool: return key in self.cache def _lowerCamelCase ( self, lowercase_ ) -> U | None: # Note: pythonic interface would throw KeyError rather than return None if key in self.cache: self.hits += 1 snake_case = self.cache[key] snake_case = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(lowercase_ ) return node.val self.miss += 1 return None def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> None: if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity snake_case = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(lowercase_ ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 snake_case = DoubleLinkedListNode(lowercase_, lowercase_ ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value snake_case = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list snake_case = value self.list.add(lowercase_ ) @classmethod def _lowerCamelCase ( cls, lowercase_ = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]: def cache_decorator_inner(lowercase_ ) -> Callable[..., U]: def cache_decorator_wrapper(*lowercase_ ) -> U: if func not in cls.decorator_function_to_instance_map: snake_case = LRUCache(lowercase_ ) snake_case = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: snake_case = func(*lowercase_ ) cls.decorator_function_to_instance_map[func].put(args[0], lowercase_ ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(lowercase_, 'cache_info', lowercase_ ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
332
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase ( metaclass=__lowerCAmelCase ): snake_case_ = ['''note_seq'''] def __init__( self, *lowercase_, **lowercase_ ) -> str: requires_backends(self, ['note_seq'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]: requires_backends(cls, ['note_seq'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]: requires_backends(cls, ['note_seq'] )
332
1
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class lowerCamelCase ( unittest.TestCase ): def __init__( self, lowercase_, lowercase_ = True, lowercase_ = None, lowercase_ = 32, lowercase_ = True, lowercase_ = 1 / 255, lowercase_ = True, lowercase_ = True, lowercase_ = [0.48_145_466, 0.4_578_275, 0.40_821_073], lowercase_ = [0.26_862_954, 0.26_130_258, 0.27_577_711], lowercase_ = True, lowercase_=7, lowercase_=30, lowercase_=400, lowercase_=3, ) -> Dict: snake_case = parent snake_case = do_resize snake_case = size if size is not None else {'shortest_edge': 288} snake_case = size_divisor snake_case = do_rescale snake_case = rescale_factor snake_case = do_normalize snake_case = do_center_crop snake_case = image_mean snake_case = image_std snake_case = do_pad snake_case = batch_size snake_case = num_channels snake_case = min_resolution snake_case = max_resolution def _lowerCamelCase ( self ) -> Union[str, Any]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def _lowerCamelCase ( self, lowercase_, lowercase_=False ) -> Optional[int]: if not batched: snake_case = self.size['shortest_edge'] snake_case = image_inputs[0] if isinstance(lowercase_, Image.Image ): snake_case , snake_case = image.size else: snake_case , snake_case = image.shape[1], image.shape[2] snake_case = size / min(lowercase_, lowercase_ ) if h < w: snake_case , snake_case = size, scale * w else: snake_case , snake_case = scale * h, size snake_case = int((1333 / 800) * size ) if max(lowercase_, lowercase_ ) > max_size: snake_case = max_size / max(lowercase_, lowercase_ ) snake_case = newh * scale snake_case = neww * scale snake_case , snake_case = int(newh + 0.5 ), int(neww + 0.5 ) snake_case , snake_case = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: snake_case = [] for image in image_inputs: snake_case , snake_case = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case = max(lowercase_, key=lambda lowercase_ : item[0] )[0] snake_case = max(lowercase_, key=lambda lowercase_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase ( __lowerCAmelCase , unittest.TestCase ): snake_case_ = BridgeTowerImageProcessor if is_vision_available() else None def _lowerCamelCase ( self ) -> Union[str, Any]: snake_case = BridgeTowerImageProcessingTester(self ) @property def _lowerCamelCase ( self ) -> int: return self.image_processor_tester.prepare_image_processor_dict() def _lowerCamelCase ( self ) -> Dict: snake_case = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_, 'image_mean' ) ) self.assertTrue(hasattr(lowercase_, 'image_std' ) ) self.assertTrue(hasattr(lowercase_, 'do_normalize' ) ) self.assertTrue(hasattr(lowercase_, 'do_resize' ) ) self.assertTrue(hasattr(lowercase_, 'size' ) ) self.assertTrue(hasattr(lowercase_, 'size_divisor' ) ) def _lowerCamelCase ( self ) -> Tuple: pass def _lowerCamelCase ( self ) -> Optional[int]: # Initialize image processor snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_, Image.Image ) # Test not batched input snake_case = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values snake_case , snake_case = self.image_processor_tester.get_expected_values(lowercase_ ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched snake_case = image_processing(lowercase_, return_tensors='pt' ).pixel_values snake_case , snake_case = self.image_processor_tester.get_expected_values(lowercase_, batched=lowercase_ ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def _lowerCamelCase ( self ) -> Tuple: # Initialize image processor snake_case = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowercase_, numpify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_, np.ndarray ) # Test not batched input snake_case = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values snake_case , snake_case = self.image_processor_tester.get_expected_values(lowercase_ ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched snake_case = image_processing(lowercase_, return_tensors='pt' ).pixel_values snake_case , snake_case = self.image_processor_tester.get_expected_values(lowercase_, batched=lowercase_ ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def _lowerCamelCase ( self ) -> Dict: # Initialize image processor snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowercase_, torchify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_, torch.Tensor ) # Test not batched input snake_case = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values snake_case , snake_case = self.image_processor_tester.get_expected_values(lowercase_ ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched snake_case = image_processing(lowercase_, return_tensors='pt' ).pixel_values snake_case , snake_case = self.image_processor_tester.get_expected_values(lowercase_, batched=lowercase_ ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), )
332
'''simple docstring''' import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor lowerCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase ( __lowerCAmelCase ): def __init__( self, *lowercase_, **lowercase_ ) -> None: warnings.warn( 'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DPTImageProcessor instead.', lowercase_, ) super().__init__(*lowercase_, **lowercase_ )
332
1
'''simple docstring''' import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCamelCase ( __lowerCAmelCase , unittest.TestCase ): # TODO: is there an appropriate internal test set? snake_case_ = '''ssube/stable-diffusion-x4-upscaler-onnx''' def _lowerCamelCase ( self, lowercase_=0 ) -> List[str]: snake_case = floats_tensor((1, 3, 128, 128), rng=random.Random(lowercase_ ) ) snake_case = torch.manual_seed(lowercase_ ) snake_case = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def _lowerCamelCase ( self ) -> List[str]: snake_case = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = self.get_dummy_inputs() snake_case = pipe(**lowercase_ ).images snake_case = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) snake_case = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def _lowerCamelCase ( self ) -> Tuple: snake_case = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' ) snake_case = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = self.get_dummy_inputs() snake_case = pipe(**lowercase_ ).images snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case = np.array( [0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _lowerCamelCase ( self ) -> Dict: snake_case = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' ) snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = self.get_dummy_inputs() snake_case = pipe(**lowercase_ ).images snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case = np.array( [0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _lowerCamelCase ( self ) -> str: snake_case = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' ) snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = self.get_dummy_inputs() snake_case = pipe(**lowercase_ ).images snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _lowerCamelCase ( self ) -> Union[str, Any]: snake_case = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' ) snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = self.get_dummy_inputs() snake_case = pipe(**lowercase_ ).images snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case = np.array( [0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): @property def _lowerCamelCase ( self ) -> int: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = ort.SessionOptions() snake_case = False return options def _lowerCamelCase ( self ) -> List[Any]: snake_case = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) snake_case = init_image.resize((128, 128) ) # using the PNDM scheduler by default snake_case = OnnxStableDiffusionUpscalePipeline.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx', provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'A fantasy landscape, trending on artstation' snake_case = torch.manual_seed(0 ) snake_case = pipe( prompt=lowercase_, image=lowercase_, guidance_scale=7.5, num_inference_steps=10, generator=lowercase_, output_type='np', ) snake_case = output.images snake_case = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) snake_case = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def _lowerCamelCase ( self ) -> Optional[int]: snake_case = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) snake_case = init_image.resize((128, 128) ) snake_case = LMSDiscreteScheduler.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx', subfolder='scheduler' ) snake_case = OnnxStableDiffusionUpscalePipeline.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx', scheduler=lowercase_, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'A fantasy landscape, trending on artstation' snake_case = torch.manual_seed(0 ) snake_case = pipe( prompt=lowercase_, image=lowercase_, guidance_scale=7.5, num_inference_steps=20, generator=lowercase_, output_type='np', ) snake_case = output.images snake_case = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) snake_case = np.array( [0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
332
'''simple docstring''' import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset lowerCAmelCase_ = pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) lowerCAmelCase_ = dataset.iloc[:, 1:2].values lowerCAmelCase_ = dataset.iloc[:, 2].values lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0) lowerCAmelCase_ = PolynomialFeatures(degree=4) lowerCAmelCase_ = poly_reg.fit_transform(X) lowerCAmelCase_ = LinearRegression() pol_reg.fit(X_poly, y) def __magic_name__ ( ) -> Any: plt.scatter(A , A , color='red' ) plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' ) plt.title('Truth or Bluff (Linear Regression)' ) plt.xlabel('Position level' ) plt.ylabel('Salary' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
332
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''roberta''' def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple: super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ ) snake_case = vocab_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = hidden_act snake_case = intermediate_size snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_vocab_size snake_case = initializer_range snake_case = layer_norm_eps snake_case = position_embedding_type snake_case = use_cache snake_case = classifier_dropout class lowerCamelCase ( __lowerCAmelCase ): @property def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
332
'''simple docstring''' import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''''' snake_case_ = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) snake_case_ = None # compression type in fsspec. ex: "gzip" snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str: super().__init__(self, **lowercase_ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode snake_case = fsspec.open( lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={ 'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459 'trust_env': True, # Enable reading proxy env variables. **(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed. }, **(target_options or {}), ) snake_case = os.path.basename(self.file.path.split('::' )[0] ) snake_case = ( self.compressed_name[: self.compressed_name.rindex('.' )] if '.' in self.compressed_name else self.compressed_name ) snake_case = None @classmethod def _lowerCamelCase ( cls, lowercase_ ) -> Any: # compressed file paths are always relative to the archive root return super()._strip_protocol(lowercase_ ).lstrip('/' ) def _lowerCamelCase ( self ) -> Optional[Any]: if self.dir_cache is None: snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name} snake_case = {f['name']: f} def _lowerCamelCase ( self, lowercase_ ) -> str: return self.file.open().read() def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any: snake_case = self._strip_protocol(lowercase_ ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''bz2''' snake_case_ = '''bz2''' snake_case_ = '''.bz2''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''gzip''' snake_case_ = '''gzip''' snake_case_ = '''.gz''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''lz4''' snake_case_ = '''lz4''' snake_case_ = '''.lz4''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''xz''' snake_case_ = '''xz''' snake_case_ = '''.xz''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''zstd''' snake_case_ = '''zstd''' snake_case_ = '''.zst''' def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]: super().__init__( fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 snake_case = self.file.__enter__ class lowerCamelCase : def __init__( self, lowercase_ ) -> List[Any]: snake_case = file_ def __enter__( self ) -> Dict: self._file.__enter__() return self def __exit__( self, *lowercase_, **lowercase_ ) -> Dict: self._file.__exit__(*lowercase_, **lowercase_ ) def __iter__( self ) -> List[str]: return iter(self._file ) def _lowerCamelCase ( self ) -> List[str]: return next(self._file ) def __getattr__( self, lowercase_ ) -> List[Any]: return getattr(self._file, lowercase_ ) def fixed_enter(*lowercase_, **lowercase_ ): return WrappedFile(_enter(*lowercase_, **lowercase_ ) ) snake_case = fixed_enter
332
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { "caidas/swin2sr-classicalsr-x2-64": ( "https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json" ), } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''swin2sr''' snake_case_ = { '''hidden_size''': '''embed_dim''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self, lowercase_=64, lowercase_=1, lowercase_=3, lowercase_=180, lowercase_=[6, 6, 6, 6, 6, 6], lowercase_=[6, 6, 6, 6, 6, 6], lowercase_=8, lowercase_=2.0, lowercase_=True, lowercase_=0.0, lowercase_=0.0, lowercase_=0.1, lowercase_="gelu", lowercase_=False, lowercase_=0.02, lowercase_=1E-5, lowercase_=2, lowercase_=1.0, lowercase_="1conv", lowercase_="pixelshuffle", **lowercase_, ) -> Union[str, Any]: super().__init__(**lowercase_ ) snake_case = image_size snake_case = patch_size snake_case = num_channels snake_case = embed_dim snake_case = depths snake_case = len(lowercase_ ) snake_case = num_heads snake_case = window_size snake_case = mlp_ratio snake_case = qkv_bias snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = drop_path_rate snake_case = hidden_act snake_case = use_absolute_embeddings snake_case = layer_norm_eps snake_case = initializer_range snake_case = upscale snake_case = img_range snake_case = resi_connection snake_case = upsampler
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A , A , A ) -> int | float: if len(A ) == 0: raise ValueError('find_max() arg is an empty sequence' ) if ( left >= len(A ) or left < -len(A ) or right >= len(A ) or right < -len(A ) ): raise IndexError('list index out of range' ) if left == right: return nums[left] snake_case = (left + right) >> 1 # the middle snake_case = find_max(A , A , A ) # find max in range[left, mid] snake_case = find_max(A , mid + 1 , A ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
332
1
'''simple docstring''' def __magic_name__ ( A , A ) -> str: if b == 0: return 1 if (b % 2) == 0: return actual_power(A , int(b / 2 ) ) * actual_power(A , int(b / 2 ) ) else: return a * actual_power(A , int(b / 2 ) ) * actual_power(A , int(b / 2 ) ) def __magic_name__ ( A , A ) -> float: if b < 0: return 1 / actual_power(A , A ) return actual_power(A , A ) if __name__ == "__main__": print(power(-2, -3))
332
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = 42 class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ): @register_to_config def __init__( self, lowercase_ = 3, lowercase_ = 3, lowercase_ = ("DownEncoderBlock2D",), lowercase_ = ("UpDecoderBlock2D",), lowercase_ = (64,), lowercase_ = 1, lowercase_ = "silu", lowercase_ = 3, lowercase_ = 32, lowercase_ = 256, lowercase_ = 32, lowercase_ = None, lowercase_ = 0.18_215, lowercase_ = "group", ) -> str: super().__init__() # pass init params to Encoder snake_case = Encoder( in_channels=lowercase_, out_channels=lowercase_, down_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, double_z=lowercase_, ) snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels snake_case = nn.Convad(lowercase_, lowercase_, 1 ) snake_case = VectorQuantizer(lowercase_, lowercase_, beta=0.25, remap=lowercase_, sane_index_shape=lowercase_ ) snake_case = nn.Convad(lowercase_, lowercase_, 1 ) # pass init params to Decoder snake_case = Decoder( in_channels=lowercase_, out_channels=lowercase_, up_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, norm_type=lowercase_, ) @apply_forward_hook def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> VQEncoderOutput: snake_case = self.encoder(lowercase_ ) snake_case = self.quant_conv(lowercase_ ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowercase_ ) @apply_forward_hook def _lowerCamelCase ( self, lowercase_, lowercase_ = False, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: # also go through quantization layer if not force_not_quantize: snake_case , snake_case , snake_case = self.quantize(lowercase_ ) else: snake_case = h snake_case = self.post_quant_conv(lowercase_ ) snake_case = self.decoder(lowercase_, quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: snake_case = sample snake_case = self.encode(lowercase_ ).latents snake_case = self.decode(lowercase_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowercase_ )
332
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCAmelCase_ = { "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", "GPTNeoForTokenClassification", "GPTNeoModel", "GPTNeoPreTrainedModel", "load_tf_weights_in_gpt_neo", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
332
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int: snake_case = [0] snake_case = 42 for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target snake_case = 0 # the area corresponding to the grid that gives the product closest to target snake_case = 0 # an estimate of b, using the quadratic formula snake_case = 42 # the largest integer less than b_estimate snake_case = 42 # the largest integer less than b_estimate snake_case = 42 # the triangle number corresponding to b_floor snake_case = 42 # the triangle number corresponding to b_ceil snake_case = 42 for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 snake_case = floor(A ) snake_case = ceil(A ) snake_case = triangle_numbers[b_floor] snake_case = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): snake_case = triangle_b_first_guess * triangle_a snake_case = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): snake_case = triangle_b_second_guess * triangle_a snake_case = idx_a * b_ceil return area if __name__ == "__main__": print(f"{solution() = }")
332
1
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = ["model.decoder.embed_positions.weights"] def __magic_name__ ( A ) -> int: if "emb" in name: snake_case = name.replace('emb' , 'model.decoder.embed_tokens' ) if "transformer" in name: snake_case = name.replace('transformer' , 'model.decoder' ) if "cross_attention" in name: snake_case = name.replace('cross_attention' , 'encoder_attn' ) if "linear1" in name: snake_case = name.replace('linear1' , 'fc1' ) if "linear2" in name: snake_case = name.replace('linear2' , 'fc2' ) if "norm1" in name: snake_case = name.replace('norm1' , 'self_attn_layer_norm' ) if "norm_cross" in name: snake_case = name.replace('norm_cross' , 'encoder_attn_layer_norm' ) if "norm2" in name: snake_case = name.replace('norm2' , 'final_layer_norm' ) if "out_norm" in name: snake_case = name.replace('out_norm' , 'model.decoder.layer_norm' ) if "linears" in name: snake_case = name.replace('linears' , 'lm_heads' ) if "condition_provider.conditioners.description.output_proj" in name: snake_case = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' ) return name def __magic_name__ ( A , A ) -> Tuple[Dict, Dict]: snake_case = list(state_dict.keys() ) snake_case = {} for key in keys: snake_case = state_dict.pop(A ) snake_case = rename_keys(A ) if "in_proj_weight" in key: # split fused qkv proj snake_case = val[:hidden_size, :] snake_case = val[hidden_size : 2 * hidden_size, :] snake_case = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: snake_case = val else: snake_case = val return state_dict, enc_dec_proj_state_dict def __magic_name__ ( A ) -> MusicgenDecoderConfig: if checkpoint == "small": # default config values snake_case = 1_0_2_4 snake_case = 2_4 snake_case = 1_6 elif checkpoint == "medium": snake_case = 1_5_3_6 snake_case = 4_8 snake_case = 2_4 elif checkpoint == "large": snake_case = 2_0_4_8 snake_case = 4_8 snake_case = 3_2 else: raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' ) snake_case = MusicgenDecoderConfig( hidden_size=A , ffn_dim=hidden_size * 4 , num_hidden_layers=A , num_attention_heads=A , ) return config @torch.no_grad() def __magic_name__ ( A , A=None , A=None , A="cpu" ) -> Any: snake_case = MusicGen.get_pretrained(A , device=A ) snake_case = decoder_config_from_checkpoint(A ) snake_case = fairseq_model.lm.state_dict() snake_case , snake_case = rename_state_dict( A , hidden_size=decoder_config.hidden_size ) snake_case = TaEncoderModel.from_pretrained('t5-base' ) snake_case = EncodecModel.from_pretrained('facebook/encodec_32khz' ) snake_case = MusicgenForCausalLM(A ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection snake_case , snake_case = decoder.load_state_dict(A , strict=A ) for key in missing_keys.copy(): if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(A ) if len(A ) > 0: raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' ) if len(A ) > 0: raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' ) # init the composite model snake_case = MusicgenForConditionalGeneration(text_encoder=A , audio_encoder=A , decoder=A ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(A ) # check we can do a forward pass snake_case = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) snake_case = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): snake_case = model(input_ids=A , decoder_input_ids=A ).logits if logits.shape != (8, 1, 2_0_4_8): raise ValueError('Incorrect shape for logits' ) # now construct the processor snake_case = AutoTokenizer.from_pretrained('t5-base' ) snake_case = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' ) snake_case = MusicgenProcessor(feature_extractor=A , tokenizer=A ) # set the appropriate bos/pad token ids snake_case = 2_0_4_8 snake_case = 2_0_4_8 # set other default generation config params snake_case = int(3_0 * audio_encoder.config.frame_rate ) snake_case = True snake_case = 3.0 if pytorch_dump_folder is not None: Path(A ).mkdir(exist_ok=A ) logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' ) model.save_pretrained(A ) processor.save_pretrained(A ) if repo_id: logger.info(F'''Pushing model {checkpoint} to {repo_id}''' ) model.push_to_hub(A ) processor.push_to_hub(A ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) lowerCAmelCase_ = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
332
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = { "configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"], "processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["VisionTextDualEncoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["FlaxVisionTextDualEncoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["TFVisionTextDualEncoderModel"] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
332
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''xmod''' def __init__( self, lowercase_=30522, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, lowercase_=False, lowercase_=2, lowercase_=False, lowercase_=True, lowercase_=True, lowercase_=("en_XX",), lowercase_=None, **lowercase_, ) -> List[Any]: super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ ) snake_case = vocab_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = hidden_act snake_case = intermediate_size snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_vocab_size snake_case = initializer_range snake_case = layer_norm_eps snake_case = position_embedding_type snake_case = use_cache snake_case = classifier_dropout snake_case = pre_norm snake_case = adapter_reduction_factor snake_case = adapter_layer_norm snake_case = adapter_reuse_layer_norm snake_case = ln_before_adapter snake_case = list(lowercase_ ) snake_case = default_language class lowerCamelCase ( __lowerCAmelCase ): @property def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
332
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCamelCase : snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} ) snake_case_ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) snake_case_ = field( default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , ) snake_case_ = field( default=64 , metadata={ '''help''': ( '''The maximum number of tokens for the question. Questions longer than this will ''' '''be truncated to this length.''' ) } , ) snake_case_ = field( default=30 , metadata={ '''help''': ( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ) } , ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} ) snake_case_ = field( default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) snake_case_ = field( default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) snake_case_ = field( default=0 , metadata={ '''help''': ( '''language id of input for language-specific xlm models (see''' ''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)''' ) } , ) snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} ) class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''train''' snake_case_ = '''dev''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int: snake_case = args snake_case = is_language_sensitive snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(lowercase_, lowercase_ ): try: snake_case = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) snake_case = mode # Load data features from cache or dataset file snake_case = 'v2' if args.version_2_with_negative else 'v1' snake_case = os.path.join( cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. snake_case = cached_features_file + '.lock' with FileLock(lowercase_ ): if os.path.exists(lowercase_ ) and not args.overwrite_cache: snake_case = time.time() snake_case = torch.load(lowercase_ ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. snake_case = self.old_features['features'] snake_case = self.old_features.get('dataset', lowercase_ ) snake_case = self.old_features.get('examples', lowercase_ ) logger.info( F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' ' future run' ) else: if mode == Split.dev: snake_case = self.processor.get_dev_examples(args.data_dir ) else: snake_case = self.processor.get_train_examples(args.data_dir ) snake_case , snake_case = squad_convert_examples_to_features( examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, ) snake_case = time.time() torch.save( {'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self ) -> Tuple: return len(self.features ) def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]: # Convert to Tensors and build dataset snake_case = self.features[i] snake_case = torch.tensor(feature.input_ids, dtype=torch.long ) snake_case = torch.tensor(feature.attention_mask, dtype=torch.long ) snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long ) snake_case = torch.tensor(feature.cls_index, dtype=torch.long ) snake_case = torch.tensor(feature.p_mask, dtype=torch.float ) snake_case = torch.tensor(feature.is_impossible, dtype=torch.float ) snake_case = { 'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'cls_index': cls_index, 'p_mask': p_mask} ) if self.args.version_2_with_negative: inputs.update({'is_impossible': is_impossible} ) if self.is_language_sensitive: inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: snake_case = torch.tensor(feature.start_position, dtype=torch.long ) snake_case = torch.tensor(feature.end_position, dtype=torch.long ) inputs.update({'start_positions': start_positions, 'end_positions': end_positions} ) return inputs
332
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging lowerCAmelCase_ = logging.get_logger(__name__) def __magic_name__ ( A ) -> List[int]: if isinstance(A , np.ndarray ): return list(tensor.shape ) snake_case = tf.shape(A ) if tensor.shape == tf.TensorShape(A ): return dynamic snake_case = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(A )] def __magic_name__ ( A , A = None , A = None ) -> tf.Tensor: return tf.nn.softmax(logits=logits + 1E-9 , axis=A , name=A ) def __magic_name__ ( A , A , A , A=1E-5 , A=-1 ) -> Any: # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(A , A ): raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' ) # Get mean and variance on the axis to be normalized snake_case , snake_case = tf.nn.moments(A , axes=[axis] , keepdims=A ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis snake_case = [1] * inputs.shape.rank snake_case = shape_list(A )[axis] snake_case = tf.reshape(A , A ) snake_case = tf.reshape(A , A ) # Compute layer normalization using the batch_normalization # function. snake_case = tf.nn.batch_normalization( A , A , A , offset=A , scale=A , variance_epsilon=A , ) return outputs def __magic_name__ ( A , A=0 , A=-1 ) -> Tuple: # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input snake_case = tf.shape(A ) snake_case = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) snake_case = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(A , A ) def __magic_name__ ( A ) -> tf.Tensor: if not isinstance(A , tf.Tensor ): snake_case = tf.convert_to_tensor(A ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: snake_case = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: snake_case = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) snake_case = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def __magic_name__ ( A , A , A = "input_ids" ) -> None: tf.debugging.assert_less( A , tf.cast(A , dtype=tensor.dtype ) , message=( F'''The maximum value of {tensor_name} ({tf.math.reduce_max(A )}) must be smaller than the embedding ''' F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) , ) def __magic_name__ ( A , A , A ) -> Dict: snake_case = 6_4_5_1_2 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. snake_case = [x for x in data if len(A ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( 'The following attributes cannot be saved to HDF5 file because ' F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' F'''bytes: {bad_attributes}''' ) snake_case = np.asarray(A ) snake_case = 1 snake_case = np.array_split(A , A ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 snake_case = np.array_split(A , A ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(A ): snake_case = chunk_data else: snake_case = data def __magic_name__ ( A , A ) -> Tuple: if name in group.attrs: snake_case = [n.decode('utf8' ) if hasattr(A , 'decode' ) else n for n in group.attrs[name]] else: snake_case = [] snake_case = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode('utf8' ) if hasattr(A , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] ) chunk_id += 1 return data def __magic_name__ ( A ) -> Union[str, Any]: def _expand_single_ad_tensor(A ): if isinstance(A , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(A , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , A )
332
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def __magic_name__ ( A , A , A ) -> Any: # Initialise PyTorch model snake_case = BertConfig.from_json_file(A ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case = BertForPreTraining(A ) # Load weights from tf checkpoint load_tf_weights_in_bert(A , A , A ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , A ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--bert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
332
1
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCamelCase ( self ) -> Any: snake_case = 1 snake_case = 3 snake_case = (32, 32) snake_case = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(lowercase_ ) return image @property def _lowerCamelCase ( self ) -> List[Any]: torch.manual_seed(0 ) snake_case = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, ) return model @property def _lowerCamelCase ( self ) -> str: torch.manual_seed(0 ) snake_case = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, ) return model @property def _lowerCamelCase ( self ) -> Union[str, Any]: torch.manual_seed(0 ) snake_case = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(lowercase_ ) @property def _lowerCamelCase ( self ) -> int: def extract(*lowercase_, **lowercase_ ): class lowerCamelCase : def __init__( self ) -> int: snake_case = torch.ones([0] ) def _lowerCamelCase ( self, lowercase_ ) -> Tuple: self.pixel_values.to(lowercase_ ) return self return Out() return extract def _lowerCamelCase ( self ) -> List[Any]: snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case = self.dummy_cond_unet snake_case = DDIMScheduler( beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=lowercase_, set_alpha_to_one=lowercase_, ) snake_case = self.dummy_vae snake_case = self.dummy_text_encoder snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # make sure here that pndm scheduler skips prk snake_case = StableDiffusionPipeline( unet=lowercase_, scheduler=lowercase_, vae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, safety_checker=lowercase_, feature_extractor=self.dummy_extractor, ) snake_case = sd_pipe.to(lowercase_ ) sd_pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'A painting of a squirrel eating a burger' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = sd_pipe([prompt], generator=lowercase_, guidance_scale=6.0, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = sd_pipe( [prompt], generator=lowercase_, guidance_scale=6.0, num_inference_steps=2, output_type='np', return_dict=lowercase_, )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self ) -> Dict: snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case = self.dummy_cond_unet snake_case = PNDMScheduler(skip_prk_steps=lowercase_ ) snake_case = self.dummy_vae snake_case = self.dummy_text_encoder snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # make sure here that pndm scheduler skips prk snake_case = StableDiffusionPipeline( unet=lowercase_, scheduler=lowercase_, vae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, safety_checker=lowercase_, feature_extractor=self.dummy_extractor, ) snake_case = sd_pipe.to(lowercase_ ) sd_pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'A painting of a squirrel eating a burger' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = sd_pipe([prompt], generator=lowercase_, guidance_scale=6.0, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = sd_pipe( [prompt], generator=lowercase_, guidance_scale=6.0, num_inference_steps=2, output_type='np', return_dict=lowercase_, )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = StableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-lms-pipe', safety_checker=lowercase_ ) assert isinstance(lowercase_, lowercase_ ) assert isinstance(pipe.scheduler, lowercase_ ) assert pipe.safety_checker is None snake_case = pipe('example prompt', num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowercase_ ) snake_case = StableDiffusionPipeline.from_pretrained(lowercase_ ) # sanity check that the pipeline still works assert pipe.safety_checker is None snake_case = pipe('example prompt', num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != 'cuda', 'This test requires a GPU' ) def _lowerCamelCase ( self ) -> Optional[int]: snake_case = self.dummy_cond_unet snake_case = PNDMScheduler(skip_prk_steps=lowercase_ ) snake_case = self.dummy_vae snake_case = self.dummy_text_encoder snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # put models in fp16 snake_case = unet.half() snake_case = vae.half() snake_case = bert.half() # make sure here that pndm scheduler skips prk snake_case = StableDiffusionPipeline( unet=lowercase_, scheduler=lowercase_, vae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, safety_checker=lowercase_, feature_extractor=self.dummy_extractor, ) snake_case = sd_pipe.to(lowercase_ ) sd_pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'A painting of a squirrel eating a burger' snake_case = sd_pipe([prompt], num_inference_steps=2, output_type='np' ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self ) -> str: snake_case = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5', safety_checker=lowercase_ ) snake_case = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) snake_case = sd_pipe.to(lowercase_ ) sd_pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = ( 'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle' ' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with' ' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and' ' children from bahnhof zoo, detailed ' ) snake_case = 4003660346 snake_case = 7 # without safety guidance (sld_guidance_scale = 0) snake_case = torch.manual_seed(lowercase_ ) snake_case = sd_pipe( [prompt], generator=lowercase_, guidance_scale=lowercase_, num_inference_steps=50, output_type='np', width=512, height=512, sld_guidance_scale=0, ) snake_case = output.images snake_case = image[0, -3:, -3:, -1] snake_case = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) snake_case = torch.manual_seed(lowercase_ ) snake_case = sd_pipe( [prompt], generator=lowercase_, guidance_scale=lowercase_, num_inference_steps=50, output_type='np', width=512, height=512, sld_guidance_scale=2000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, ) snake_case = output.images snake_case = image[0, -3:, -3:, -1] snake_case = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self ) -> int: snake_case = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5', safety_checker=lowercase_ ) snake_case = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) snake_case = sd_pipe.to(lowercase_ ) sd_pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'padme amidala taking a bath artwork, safe for work, no nudity' snake_case = 2734971755 snake_case = 7 snake_case = torch.manual_seed(lowercase_ ) snake_case = sd_pipe( [prompt], generator=lowercase_, guidance_scale=lowercase_, num_inference_steps=50, output_type='np', width=512, height=512, sld_guidance_scale=0, ) snake_case = output.images snake_case = image[0, -3:, -3:, -1] snake_case = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 snake_case = torch.manual_seed(lowercase_ ) snake_case = sd_pipe( [prompt], generator=lowercase_, guidance_scale=lowercase_, num_inference_steps=50, output_type='np', width=512, height=512, sld_guidance_scale=2000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, ) snake_case = output.images snake_case = image[0, -3:, -3:, -1] snake_case = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self ) -> Optional[int]: snake_case = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' ) snake_case = sd_pipe.to(lowercase_ ) sd_pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = ( 'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.' ' leyendecker' ) snake_case = 1044355234 snake_case = 12 snake_case = torch.manual_seed(lowercase_ ) snake_case = sd_pipe( [prompt], generator=lowercase_, guidance_scale=lowercase_, num_inference_steps=50, output_type='np', width=512, height=512, sld_guidance_scale=0, ) snake_case = output.images snake_case = image[0, -3:, -3:, -1] snake_case = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 snake_case = torch.manual_seed(lowercase_ ) snake_case = sd_pipe( [prompt], generator=lowercase_, guidance_scale=lowercase_, num_inference_steps=50, output_type='np', width=512, height=512, sld_guidance_scale=2000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, ) snake_case = output.images snake_case = image[0, -3:, -3:, -1] snake_case = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A ) -> list: if len(A ) == 0: return [] snake_case , snake_case = min(A ), max(A ) snake_case = int(max_value - min_value ) + 1 snake_case = [[] for _ in range(A )] for i in my_list: buckets[int(i - min_value )].append(A ) return [v for bucket in buckets for v in sorted(A )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
332
1
'''simple docstring''' def __magic_name__ ( A ) -> list[int]: snake_case = [0 for i in range(len(A ) )] # initialize interval's left pointer and right pointer snake_case , snake_case = 0, 0 for i in range(1 , len(A ) ): # case when current index is inside the interval if i <= right_pointer: snake_case = min(right_pointer - i + 1 , z_result[i - left_pointer] ) snake_case = min_edge while go_next(A , A , A ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: snake_case , snake_case = i, i + z_result[i] - 1 return z_result def __magic_name__ ( A , A , A ) -> bool: return i + z_result[i] < len(A ) and s[z_result[i]] == s[i + z_result[i]] def __magic_name__ ( A , A ) -> int: snake_case = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string snake_case = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(A ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
332
'''simple docstring''' def __magic_name__ ( A ) -> float: return 1_0 - x * x def __magic_name__ ( A , A ) -> float: # Bolzano theory in order to find if there is a root between a and b if equation(A ) * equation(A ) >= 0: raise ValueError('Wrong space!' ) snake_case = a while (b - a) >= 0.01: # Find middle point snake_case = (a + b) / 2 # Check if middle point is root if equation(A ) == 0.0: break # Decide the side to repeat the steps if equation(A ) * equation(A ) < 0: snake_case = c else: snake_case = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
332
1
'''simple docstring''' import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version(">=", FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType lowerCAmelCase_ = get_logger(__name__) def __magic_name__ ( A , A , A , A , A=0 ) -> Tuple: os.makedirs(A , exist_ok=A ) with FSDP.state_dict_type( A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): snake_case = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin''' snake_case = os.path.join(A , A ) if accelerator.process_index == 0: logger.info(F'''Saving model to {output_model_file}''' ) torch.save(A , A ) logger.info(F'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: snake_case = ( F'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) snake_case = os.path.join(A , A ) logger.info(F'''Saving model to {output_model_file}''' ) torch.save(A , A ) logger.info(F'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: snake_case = os.path.join(A , F'''{MODEL_NAME}_{model_index}''' ) os.makedirs(A , exist_ok=A ) logger.info(F'''Saving model to {ckpt_dir}''' ) snake_case = {'model': state_dict} dist_cp.save_state_dict( state_dict=A , storage_writer=dist_cp.FileSystemWriter(A ) , planner=DefaultSavePlanner() , ) logger.info(F'''Model saved to {ckpt_dir}''' ) def __magic_name__ ( A , A , A , A , A=0 ) -> str: accelerator.wait_for_everyone() with FSDP.state_dict_type( A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(A ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( 'Set the `sync_module_states` flag to `True` so that model states are synced across processes when ' 'initializing FSDP object' ) return snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin''' snake_case = os.path.join(A , A ) logger.info(F'''Loading model from {input_model_file}''' ) snake_case = torch.load(A ) logger.info(F'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: snake_case = ( F'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) snake_case = os.path.join(A , A ) logger.info(F'''Loading model from {input_model_file}''' ) snake_case = torch.load(A ) logger.info(F'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: snake_case = ( os.path.join(A , F'''{MODEL_NAME}_{model_index}''' ) if F'''{MODEL_NAME}''' not in input_dir else input_dir ) logger.info(F'''Loading model from {ckpt_dir}''' ) snake_case = {'model': model.state_dict()} dist_cp.load_state_dict( state_dict=A , storage_reader=dist_cp.FileSystemReader(A ) , planner=DefaultLoadPlanner() , ) snake_case = state_dict['model'] logger.info(F'''Model loaded from {ckpt_dir}''' ) model.load_state_dict(A ) def __magic_name__ ( A , A , A , A , A , A=0 ) -> Optional[Any]: os.makedirs(A , exist_ok=A ) with FSDP.state_dict_type( A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): snake_case = FSDP.optim_state_dict(A , A ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: snake_case = ( F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) snake_case = os.path.join(A , A ) logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' ) torch.save(A , A ) logger.info(F'''Optimizer state saved in {output_optimizer_file}''' ) else: snake_case = os.path.join(A , F'''{OPTIMIZER_NAME}_{optimizer_index}''' ) os.makedirs(A , exist_ok=A ) logger.info(F'''Saving Optimizer state to {ckpt_dir}''' ) dist_cp.save_state_dict( state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(A ) , planner=DefaultSavePlanner() , ) logger.info(F'''Optimizer state saved in {ckpt_dir}''' ) def __magic_name__ ( A , A , A , A , A , A=0 ) -> int: accelerator.wait_for_everyone() with FSDP.state_dict_type( A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: snake_case = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: snake_case = ( F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) snake_case = os.path.join(A , A ) logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' ) snake_case = torch.load(A ) logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' ) else: snake_case = ( os.path.join(A , F'''{OPTIMIZER_NAME}_{optimizer_index}''' ) if F'''{OPTIMIZER_NAME}''' not in input_dir else input_dir ) logger.info(F'''Loading Optimizer from {ckpt_dir}''' ) snake_case = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(A ) , ) snake_case = optim_state['optimizer'] logger.info(F'''Optimizer loaded from {ckpt_dir}''' ) snake_case = FSDP.optim_state_dict_to_load(A , A , A ) optimizer.load_state_dict(A )
332
'''simple docstring''' import pytest lowerCAmelCase_ = "__dummy_dataset1__" lowerCAmelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n" @pytest.fixture def __magic_name__ ( ) -> List[Any]: return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def __magic_name__ ( ) -> Union[str, Any]: return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def __magic_name__ ( A , A , A ) -> Optional[int]: snake_case = dataset_loading_script_name snake_case = tmp_path / 'datasets' / script_name script_dir.mkdir(parents=A ) snake_case = script_dir / F'''{script_name}.py''' with open(A , 'w' ) as f: f.write(A ) return str(A )
332
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ "VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTMAEForPreTraining", "ViTMAELayer", "ViTMAEModel", "ViTMAEPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ "TFViTMAEForPreTraining", "TFViTMAEModel", "TFViTMAEPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
332
'''simple docstring''' from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time lowerCAmelCase_ = Lock() def __magic_name__ ( A , A , A , A , A , A , A ) -> Any: global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 1_0 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(A ) process_lock.release() # receive your right neighbor's value process_lock.acquire() snake_case = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left snake_case = min(A , A ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(A ) process_lock.release() # receive your left neighbor's value process_lock.acquire() snake_case = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right snake_case = max(A , A ) # after all swaps are performed, send the values back to main result_pipe[1].send(A ) def __magic_name__ ( A ) -> str: snake_case = [] snake_case = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop snake_case = Pipe() snake_case = Pipe() process_array_.append( Process( target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) snake_case = temp_rs snake_case = temp_rr for i in range(1 , len(A ) - 1 ): snake_case = Pipe() snake_case = Pipe() process_array_.append( Process( target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) snake_case = temp_rs snake_case = temp_rr process_array_.append( Process( target=A , args=( len(A ) - 1, arr[len(A ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(A ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(A ) ): snake_case = result_pipe[p][0].recv() process_array_[p].join() return arr def __magic_name__ ( ) -> Tuple: snake_case = list(range(1_0 , 0 , -1 ) ) print('Initial List' ) print(*A ) snake_case = odd_even_transposition(A ) print('Sorted List\n' ) print(*A ) if __name__ == "__main__": main()
332
1
'''simple docstring''' # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class lowerCamelCase ( __lowerCAmelCase ): def __init__( self, lowercase_, lowercase_ ) -> Tuple: super().__init__() self.register_modules(unet=lowercase_, scheduler=lowercase_ ) @torch.no_grad() def __call__( self, lowercase_ = 1, lowercase_ = None, lowercase_ = 50, lowercase_ = "pil", lowercase_ = True, **lowercase_, ) -> Union[ImagePipelineOutput, Tuple]: snake_case = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=lowercase_, ) snake_case = image.to(self.device ) # set step values self.scheduler.set_timesteps(lowercase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output snake_case = self.unet(lowercase_, lowercase_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 snake_case = self.scheduler.step(lowercase_, lowercase_, lowercase_ ).prev_sample snake_case = (image / 2 + 0.5).clamp(0, 1 ) snake_case = image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": snake_case = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=lowercase_ ), "This is a local test"
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A ) -> None: create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] ) def __magic_name__ ( A , A , A , A , ) -> None: if index == len(A ): print(A ) return for i in range(len(A ) ): if not index_used[i]: current_sequence.append(sequence[i] ) snake_case = True create_state_space_tree(A , A , index + 1 , A ) current_sequence.pop() snake_case = False lowerCAmelCase_ = [3, 1, 2, 4] generate_all_permutations(sequence) lowerCAmelCase_ = ["A", "B", "C"] generate_all_permutations(sequence_a)
332
1
'''simple docstring''' import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) lowerCAmelCase_ = getLogger(__name__) def __magic_name__ ( A , A , A , A = 8 , A = 1_0_2_4 , A="val" , A=None , A=False , A="summarization" , A=None , A=1 , A = None , A="" , **A , ) -> Dict: snake_case = str(A ) assert local_rank is not None torch.distributed.init_process_group(backend='nccl' , rank=A ) snake_case = Path(A ) snake_case = save_dir.joinpath(F'''rank_{local_rank}_output.json''' ) torch.cuda.set_device(A ) snake_case = AutoModelForSeqaSeqLM.from_pretrained(A ).cuda() if fpaa: snake_case = model.half() # determine if we need to increase num_beams use_task_specific_params(A , A ) # update config with task specific params snake_case = generate_kwargs.pop('num_beams' , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: snake_case = num_return_sequences snake_case = AutoTokenizer.from_pretrained(A ) logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. if max_source_length is None: snake_case = tokenizer.model_max_length if prefix is None: snake_case = prefix or getattr(model.config , 'prefix' , '' ) or '' snake_case = SeqaSeqDataset( A , A , A , max_target_length=1_0_2_4 , type_path=A , n_obs=A , prefix=A , **A , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. snake_case = ds.make_sortish_sampler(A , distributed=A , add_extra_examples=A , shuffle=A ) snake_case = DataLoader(A , sampler=A , batch_size=A , collate_fn=ds.collate_fn ) snake_case = [] for batch in tqdm(A ): snake_case = model.generate( input_ids=batch['input_ids'].to(model.device ) , attention_mask=batch['attention_mask'].to(model.device ) , num_return_sequences=A , num_beams=A , **A , ) snake_case = tokenizer.batch_decode(A , skip_special_tokens=A , clean_up_tokenization_spaces=A ) snake_case = batch['ids'] if num_return_sequences > 1: snake_case = chunks(A , A ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(A ): results.append({'pred': pred, 'id': ids[i].item()} ) save_json(A , A ) return results, sampler.num_replicas def __magic_name__ ( ) -> Any: snake_case = argparse.ArgumentParser( epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate' ) parser.add_argument('--data_dir' , type=A , help='like cnn_dm/test.source' ) parser.add_argument( '--model_name' , type=A , help='like facebook/bart-large-cnn,t5-base, etc.' , default='sshleifer/distilbart-xsum-12-3' , ) parser.add_argument('--save_dir' , type=A , help='where to save' , default='tmp_gen' ) parser.add_argument('--max_source_length' , type=A , default=A ) parser.add_argument( '--type_path' , type=A , default='test' , help='which subset to evaluate typically train/val/test' ) parser.add_argument('--task' , type=A , default='summarization' , help='used for task_specific_params + metrics' ) parser.add_argument('--bs' , type=A , default=8 , required=A , help='batch size' ) parser.add_argument( '--local_rank' , type=A , default=-1 , required=A , help='should be passed by distributed.launch' ) parser.add_argument( '--n_obs' , type=A , default=A , required=A , help='How many observations. Defaults to all.' ) parser.add_argument( '--num_return_sequences' , type=A , default=1 , required=A , help='How many sequences to return' ) parser.add_argument( '--sync_timeout' , type=A , default=6_0_0 , required=A , help='How long should master process wait for other processes to finish.' , ) parser.add_argument('--src_lang' , type=A , default=A , required=A ) parser.add_argument('--tgt_lang' , type=A , default=A , required=A ) parser.add_argument( '--prefix' , type=A , required=A , default=A , help='will be added to the begininng of src examples' ) parser.add_argument('--fp16' , action='store_true' ) parser.add_argument('--debug' , action='store_true' ) snake_case = time.time() snake_case , snake_case = parser.parse_known_args() snake_case = parse_numeric_n_bool_cl_kwargs(A ) if generate_kwargs and args.local_rank <= 0: print(F'''parsed the following generate kwargs: {generate_kwargs}''' ) snake_case = Path(args.save_dir + '_tmp' ) Path(A ).mkdir(exist_ok=A ) # this handles locking. snake_case = list(json_save_dir.glob('rank_*.json' ) ) if intermediate_files: raise ValueError(F'''Found files at {json_save_dir} please move or remove them.''' ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. snake_case = {} if args.src_lang is not None: snake_case = args.src_lang if args.tgt_lang is not None: snake_case = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=A ) snake_case , snake_case = eval_data_dir( args.data_dir , A , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=A , **A , ) if args.local_rank <= 0: snake_case = Path(args.save_dir ) save_dir.mkdir(exist_ok=A ) snake_case = gather_results_from_each_node(A , A , args.sync_timeout ) snake_case = combine_partial_results(A ) if args.num_return_sequences > 1: snake_case = save_dir.joinpath('pseudolabel_results.json' ) print(F'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' ) save_json(A , A ) return snake_case = Path(args.data_dir ).joinpath(args.type_path + '.target' ) with open(A ) as f: snake_case = [x.rstrip() for x in f.readlines()][: len(A )] # Calculate metrics, save metrics, and save _generations.txt snake_case = 'translation' in args.task snake_case = calculate_bleu if calc_bleu else calculate_rouge snake_case = 'bleu' if calc_bleu else 'rouge' snake_case = score_fn(A , A ) snake_case = len(A ) snake_case = time.time() - start_time snake_case = round(runtime / metrics['n_obs'] , 4 ) snake_case = num_replicas # TODO(@stas00): add whatever metadata to metrics snake_case = save_dir.joinpath(F'''{args.type_path}_{metric_name}.json''' ) save_json(A , A , indent=A ) print(A ) write_txt_file(A , save_dir.joinpath(F'''{args.type_path}_generations.txt''' ) ) if args.debug: write_txt_file(A , save_dir.joinpath(F'''{args.type_path}.target''' ) ) else: shutil.rmtree(A ) def __magic_name__ ( A ) -> List: snake_case = [] for partial_result in partial_results: records.extend(A ) snake_case = sorted(A , key=lambda A : x["id"] ) snake_case = [x['pred'] for x in records] return preds def __magic_name__ ( A , A , A ) -> List[Dict[str, List]]: # WAIT FOR lots of .json files snake_case = time.time() logger.info('waiting for all nodes to finish' ) snake_case = None while (time.time() - start_wait) < timeout: snake_case = list(save_dir.glob('rank_*.json' ) ) if len(A ) < num_replicas: continue try: # make sure all json files are fully saved snake_case = lmap(A , A ) return json_data except JSONDecodeError: continue else: raise TimeoutError('Rank 0 gave up on waiting for other processes' ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
332
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''roberta''' def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple: super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ ) snake_case = vocab_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = hidden_act snake_case = intermediate_size snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_vocab_size snake_case = initializer_range snake_case = layer_norm_eps snake_case = position_embedding_type snake_case = use_cache snake_case = classifier_dropout class lowerCamelCase ( __lowerCAmelCase ): @property def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
332
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ): snake_case_ = '''maskformer-swin''' snake_case_ = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self, lowercase_=224, lowercase_=4, lowercase_=3, lowercase_=96, lowercase_=[2, 2, 6, 2], lowercase_=[3, 6, 12, 24], lowercase_=7, lowercase_=4.0, lowercase_=True, lowercase_=0.0, lowercase_=0.0, lowercase_=0.1, lowercase_="gelu", lowercase_=False, lowercase_=0.02, lowercase_=1E-5, lowercase_=None, lowercase_=None, **lowercase_, ) -> Any: super().__init__(**lowercase_ ) snake_case = image_size snake_case = patch_size snake_case = num_channels snake_case = embed_dim snake_case = depths snake_case = len(lowercase_ ) snake_case = num_heads snake_case = window_size snake_case = mlp_ratio snake_case = qkv_bias snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = drop_path_rate snake_case = hidden_act snake_case = use_absolute_embeddings snake_case = layer_norm_eps snake_case = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model snake_case = int(embed_dim * 2 ** (len(lowercase_ ) - 1) ) snake_case = ['stem'] + [F'''stage{idx}''' for idx in range(1, len(lowercase_ ) + 1 )] snake_case , snake_case = get_aligned_output_features_output_indices( out_features=lowercase_, out_indices=lowercase_, stage_names=self.stage_names )
332
'''simple docstring''' import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} lowerCAmelCase_ = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } lowerCAmelCase_ = { "allenai/led-base-16384": 1_6_3_8_4, } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = LEDTokenizer snake_case_ = ['''input_ids''', '''attention_mask'''] def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int: super().__init__( lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, ) snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space: snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) ) snake_case = add_prefix_space snake_case = pre_tok_class(**lowercase_ ) snake_case = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` snake_case = 'post_processor' snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ ) if tokenizer_component_instance: snake_case = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: snake_case = tuple(state['sep'] ) if "cls" in state: snake_case = tuple(state['cls'] ) snake_case = False if state.get('add_prefix_space', lowercase_ ) != add_prefix_space: snake_case = add_prefix_space snake_case = True if state.get('trim_offsets', lowercase_ ) != trim_offsets: snake_case = trim_offsets snake_case = True if changes_to_apply: snake_case = getattr(lowercase_, state.pop('type' ) ) snake_case = component_class(**lowercase_ ) setattr(self.backend_tokenizer, lowercase_, lowercase_ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _lowerCamelCase ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _lowerCamelCase ( self, lowercase_ ) -> Any: snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value snake_case = value def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding: snake_case = kwargs.get('is_split_into_words', lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding: snake_case = kwargs.get('is_split_into_words', lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._encode_plus(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]: snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ ) return tuple(lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict: snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]: snake_case = [self.sep_token_id] snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict: snake_case = super()._pad( encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, ) # Load from model defaults if return_attention_mask is None: snake_case = 'attention_mask' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: snake_case = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ ) if needs_to_be_padded: snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` snake_case = ( encoded_inputs['global_attention_mask'] + [-1] * difference ) elif self.padding_side == "left": snake_case = [-1] * difference + encoded_inputs[ 'global_attention_mask' ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return encoded_inputs
332
1
'''simple docstring''' import argparse lowerCAmelCase_ = "docs/source/_static/js/custom.js" def __magic_name__ ( A ) -> Optional[int]: with open(A , encoding='utf-8' , newline='\n' ) as f: snake_case = f.readlines() snake_case = 0 # First let's put the right version while not lines[index].startswith('const stableVersion =' ): index += 1 snake_case = F'''const stableVersion = "v{version}"\n''' # Then update the dictionary while not lines[index].startswith('const versionMapping = {' ): index += 1 # We go until the end while not lines[index].startswith('}' ): index += 1 # We add the new version at the end lines[index - 1] += F''' "v{version}": "v{version}",\n''' with open(A , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(A ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument("--version", help="Release version.") lowerCAmelCase_ = parser.parse_args() update_custom_js(args.version)
332
'''simple docstring''' import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def __magic_name__ ( A ) -> Tuple: snake_case = [] embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', F'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', F'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', F'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', F'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def __magic_name__ ( A , A ) -> Optional[int]: snake_case = [] attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def __magic_name__ ( A ) -> List[Any]: snake_case = [] token.append((F'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') ) return token def __magic_name__ ( ) -> Dict: snake_case = [] head.append(('layernorm.weight', 'norm.weight') ) head.append(('layernorm.bias', 'norm.bias') ) head.append(('classifier.weight', 'head.weight') ) head.append(('classifier.bias', 'head.bias') ) return head def __magic_name__ ( A , A , A , A ) -> int: snake_case = 'imagenet-1k-id2label.json' snake_case = 1_0_0_0 snake_case = 'huggingface/label-files' snake_case = num_labels snake_case = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) ) snake_case = {int(A ): v for k, v in idalabel.items()} snake_case = idalabel snake_case = {v: k for k, v in idalabel.items()} snake_case = snake_case = CvtConfig(num_labels=A , idalabel=A , labelaid=A ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13": snake_case = [1, 2, 1_0] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21": snake_case = [1, 4, 1_6] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: snake_case = [2, 2, 2_0] snake_case = [3, 1_2, 1_6] snake_case = [1_9_2, 7_6_8, 1_0_2_4] snake_case = CvtForImageClassification(A ) snake_case = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) snake_case = image_size snake_case = torch.load(A , map_location=torch.device('cpu' ) ) snake_case = OrderedDict() snake_case = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: snake_case = list_of_state_dict + cls_token(A ) snake_case = list_of_state_dict + embeddings(A ) for cnt in range(config.depth[idx] ): snake_case = list_of_state_dict + attention(A , A ) snake_case = list_of_state_dict + final() for gg in list_of_state_dict: print(A ) for i in range(len(A ) ): snake_case = original_weights[list_of_state_dict[i][1]] model.load_state_dict(A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( "--cvt_model", default="cvt-w24", type=str, help="Name of the cvt model you'd like to convert.", ) parser.add_argument( "--image_size", default=3_8_4, type=int, help="Input Image Size", ) parser.add_argument( "--cvt_file_name", default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth", type=str, help="Input Image Size", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) lowerCAmelCase_ = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
332
1
'''simple docstring''' def __magic_name__ ( A , A ) -> int: while second != 0: snake_case = first & second first ^= second snake_case = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase_ = int(input("Enter the first number: ").strip()) lowerCAmelCase_ = int(input("Enter the second number: ").strip()) print(f"{add(first, second) = }")
332
'''simple docstring''' from pathlib import Path import fire def __magic_name__ ( A , A , A ) -> Union[str, Any]: snake_case = Path(A ) snake_case = Path(A ) dest_dir.mkdir(exist_ok=A ) for path in src_dir.iterdir(): snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n] snake_case = dest_dir.joinpath(path.name ) print(A ) dest_path.open('w' ).write('\n'.join(A ) ) if __name__ == "__main__": fire.Fire(minify)
332
1
'''simple docstring''' def __magic_name__ ( A , A = 0 ) -> list: snake_case = length or len(A ) snake_case = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: snake_case , snake_case = list_data[i + 1], list_data[i] snake_case = True return list_data if not swapped else bubble_sort(A , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
332
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) lowerCAmelCase_ = pytest.mark.integration @pytest.mark.parametrize('path' , ['paws', 'csv'] ) def __magic_name__ ( A , A ) -> Union[str, Any]: inspect_dataset(A , A ) snake_case = path + '.py' assert script_name in os.listdir(A ) assert "__pycache__" not in os.listdir(A ) @pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.parametrize('path' , ['accuracy'] ) def __magic_name__ ( A , A ) -> int: inspect_metric(A , A ) snake_case = path + '.py' assert script_name in os.listdir(A ) assert "__pycache__" not in os.listdir(A ) @pytest.mark.parametrize( 'path, config_name, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> List[str]: snake_case = get_dataset_config_info(A , config_name=A ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __magic_name__ ( A , A , A ) -> Any: with pytest.raises(A ): get_dataset_config_info(A , config_name=A ) @pytest.mark.parametrize( 'path, expected' , [ ('squad', 'plain_text'), ('acronym_identification', 'default'), ('lhoestq/squad', 'plain_text'), ('lhoestq/test', 'default'), ('lhoestq/demo1', 'lhoestq--demo1'), ('dalle-mini/wit', 'dalle-mini--wit'), ] , ) def __magic_name__ ( A , A ) -> Dict: snake_case = get_dataset_config_names(A ) assert expected in config_names @pytest.mark.parametrize( 'path, expected_configs, expected_splits_in_first_config' , [ ('squad', ['plain_text'], ['train', 'validation']), ('dalle-mini/wit', ['dalle-mini--wit'], ['train']), ('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> List[str]: snake_case = get_dataset_infos(A ) assert list(infos.keys() ) == expected_configs snake_case = expected_configs[0] assert expected_config in infos snake_case = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( 'path, expected_config, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> Any: snake_case = get_dataset_infos(A ) assert expected_config in infos snake_case = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __magic_name__ ( A , A , A ) -> int: with pytest.raises(A ): get_dataset_split_names(A , config_name=A )
332
1
'''simple docstring''' from typing import List from .keymap import KEYMAP, get_character def __magic_name__ ( A ) -> str: def decorator(A ): snake_case = getattr(A , 'handle_key' , [] ) handle += [key] setattr(A , 'handle_key' , A ) return func return decorator def __magic_name__ ( *A ) -> Any: def decorator(A ): snake_case = getattr(A , 'handle_key' , [] ) handle += keys setattr(A , 'handle_key' , A ) return func return decorator class lowerCamelCase ( __lowerCAmelCase ): def __new__( cls, lowercase_, lowercase_, lowercase_ ) -> Dict: snake_case = super().__new__(cls, lowercase_, lowercase_, lowercase_ ) if not hasattr(lowercase_, 'key_handler' ): setattr(lowercase_, 'key_handler', {} ) setattr(lowercase_, 'handle_input', KeyHandler.handle_input ) for value in attrs.values(): snake_case = getattr(lowercase_, 'handle_key', [] ) for key in handled_keys: snake_case = value return new_cls @staticmethod def _lowerCamelCase ( cls ) -> Union[str, Any]: snake_case = get_character() if char != KEYMAP["undefined"]: snake_case = ord(lowercase_ ) snake_case = cls.key_handler.get(lowercase_ ) if handler: snake_case = char return handler(cls ) else: return None def __magic_name__ ( cls ) -> List[str]: return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
332
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { "configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"], "processing_git": ["GitProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ "GIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GitForCausalLM", "GitModel", "GitPreTrainedModel", "GitVisionModel", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
332
1
'''simple docstring''' from __future__ import annotations import time import numpy as np lowerCAmelCase_ = [8, 5, 9, 7] lowerCAmelCase_ = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] lowerCAmelCase_ = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class lowerCamelCase : def __init__( self, lowercase_, lowercase_, lowercase_, ) -> None: snake_case = claim_vector snake_case = allocated_resources_table snake_case = maximum_claim_table def _lowerCamelCase ( self ) -> list[int]: return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def _lowerCamelCase ( self ) -> list[int]: return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def _lowerCamelCase ( self ) -> list[list[int]]: return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(lowercase_ ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def _lowerCamelCase ( self ) -> dict[int, list[int]]: return {self.__need().index(lowercase_ ): i for i in self.__need()} def _lowerCamelCase ( self, **lowercase_ ) -> None: snake_case = self.__need() snake_case = self.__allocated_resources_table snake_case = self.__available_resources() snake_case = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print('_' * 50 + '\n' ) while need_list: snake_case = False for each_need in need_list: snake_case = True for index, need in enumerate(lowercase_ ): if need > available_resources[index]: snake_case = False break if execution: snake_case = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: snake_case = original_need_index print(F'''Process {process_number + 1} is executing.''' ) # remove the process run from stack need_list.remove(lowercase_ ) # update available/freed resources stack snake_case = np.array(lowercase_ ) + np.array( alloc_resources_table[process_number] ) print( 'Updated available resource stack for processes: ' + ' '.join([str(lowercase_ ) for x in available_resources] ) ) break if safe: print('The process is in a safe state.\n' ) else: print('System in unsafe state. Aborting...\n' ) break def _lowerCamelCase ( self ) -> str: print(' ' * 9 + 'Allocated Resource Table' ) for item in self.__allocated_resources_table: print( F'''P{self.__allocated_resources_table.index(lowercase_ ) + 1}''' + ' '.join(F'''{it:>8}''' for it in item ) + '\n' ) print(' ' * 9 + 'System Resource Table' ) for item in self.__maximum_claim_table: print( F'''P{self.__maximum_claim_table.index(lowercase_ ) + 1}''' + ' '.join(F'''{it:>8}''' for it in item ) + '\n' ) print( 'Current Usage by Active Processes: ' + ' '.join(str(lowercase_ ) for x in self.__claim_vector ) ) print( 'Initial Available Resources: ' + ' '.join(str(lowercase_ ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
332
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu lowerCAmelCase_ = False class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCamelCase ( self ) -> List[Any]: return 12 @property def _lowerCamelCase ( self ) -> Dict: return 12 @property def _lowerCamelCase ( self ) -> List[Any]: return 32 @property def _lowerCamelCase ( self ) -> List[Any]: torch.manual_seed(0 ) snake_case = VQModel( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, ) return model @property def _lowerCamelCase ( self ) -> List[Any]: snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def _lowerCamelCase ( self ) -> Tuple: torch.manual_seed(0 ) snake_case = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(lowercase_ ) @property def _lowerCamelCase ( self ) -> str: torch.manual_seed(0 ) snake_case = 12 snake_case = 12 snake_case = { 'attention_bias': True, 'cross_attention_dim': 32, 'attention_head_dim': height * width, 'num_attention_heads': 1, 'num_vector_embeds': self.num_embed, 'num_embeds_ada_norm': self.num_embeds_ada_norm, 'norm_num_groups': 32, 'sample_size': width, 'activation_fn': 'geglu-approximate', } snake_case = TransformeraDModel(**lowercase_ ) return model def _lowerCamelCase ( self ) -> Tuple: snake_case = 'cpu' snake_case = self.dummy_vqvae snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_transformer snake_case = VQDiffusionScheduler(self.num_embed ) snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ ) snake_case = VQDiffusionPipeline( vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'teddy bear playing in the pool' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( [prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = 'cpu' snake_case = self.dummy_vqvae snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_transformer snake_case = VQDiffusionScheduler(self.num_embed ) snake_case = LearnedClassifierFreeSamplingEmbeddings( learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length ) snake_case = VQDiffusionPipeline( vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'teddy bear playing in the pool' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( [prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self ) -> str: snake_case = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' ) snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' ) snake_case = pipeline.to(lowercase_ ) pipeline.set_progress_bar_config(disable=lowercase_ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipeline( 'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', ) snake_case = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
332
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { "facebook/data2vec-vision-base-ft": ( "https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json" ), } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''data2vec-vision''' def __init__( self, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.0, lowercase_=0.0, lowercase_=0.02, lowercase_=1E-12, lowercase_=224, lowercase_=16, lowercase_=3, lowercase_=False, lowercase_=False, lowercase_=False, lowercase_=False, lowercase_=0.1, lowercase_=0.1, lowercase_=True, lowercase_=[3, 5, 7, 11], lowercase_=[1, 2, 3, 6], lowercase_=True, lowercase_=0.4, lowercase_=256, lowercase_=1, lowercase_=False, lowercase_=255, **lowercase_, ) -> List[Any]: super().__init__(**lowercase_ ) snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = intermediate_size snake_case = hidden_act snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = initializer_range snake_case = layer_norm_eps snake_case = image_size snake_case = patch_size snake_case = num_channels snake_case = use_mask_token snake_case = use_absolute_position_embeddings snake_case = use_relative_position_bias snake_case = use_shared_relative_position_bias snake_case = layer_scale_init_value snake_case = drop_path_rate snake_case = use_mean_pooling # decode head attributes (semantic segmentation) snake_case = out_indices snake_case = pool_scales # auxiliary head attributes (semantic segmentation) snake_case = use_auxiliary_head snake_case = auxiliary_loss_weight snake_case = auxiliary_channels snake_case = auxiliary_num_convs snake_case = auxiliary_concat_input snake_case = semantic_loss_ignore_index class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = version.parse('''1.11''' ) @property def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _lowerCamelCase ( self ) -> float: return 1E-4
332
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase ( metaclass=__lowerCAmelCase ): snake_case_ = ['''note_seq'''] def __init__( self, *lowercase_, **lowercase_ ) -> str: requires_backends(self, ['note_seq'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]: requires_backends(cls, ['note_seq'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]: requires_backends(cls, ['note_seq'] )
332
1
'''simple docstring''' def __magic_name__ ( A = 1_0 , A = 1_0_0_0 , A = True ) -> int: assert ( isinstance(A , A ) and isinstance(A , A ) and isinstance(A , A ) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' ) return min_val if option else max_val def __magic_name__ ( A , A ) -> int: return int((number_a + number_a) / 2 ) def __magic_name__ ( A , A , A ) -> None: assert ( isinstance(A , A ) and isinstance(A , A ) and isinstance(A , A ) ), 'argument values must be type of "int"' if lower > higher: raise ValueError('argument value for lower and higher must be(lower > higher)' ) if not lower < to_guess < higher: raise ValueError( 'guess value must be within the range of lower and higher value' ) def answer(A ) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print('started...' ) snake_case = lower snake_case = higher snake_case = [] while True: snake_case = get_avg(A , A ) last_numbers.append(A ) if answer(A ) == "low": snake_case = number elif answer(A ) == "high": snake_case = number else: break print(F'''guess the number : {last_numbers[-1]}''' ) print(F'''details : {last_numbers!s}''' ) def __magic_name__ ( ) -> None: snake_case = int(input('Enter lower value : ' ).strip() ) snake_case = int(input('Enter high value : ' ).strip() ) snake_case = int(input('Enter value to guess : ' ).strip() ) guess_the_number(A , A , A ) if __name__ == "__main__": main()
332
'''simple docstring''' import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor lowerCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase ( __lowerCAmelCase ): def __init__( self, *lowercase_, **lowercase_ ) -> None: warnings.warn( 'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DPTImageProcessor instead.', lowercase_, ) super().__init__(*lowercase_, **lowercase_ )
332
1
'''simple docstring''' import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class lowerCamelCase ( __lowerCAmelCase , unittest.TestCase ): snake_case_ = BarthezTokenizer snake_case_ = BarthezTokenizerFast snake_case_ = True snake_case_ = True def _lowerCamelCase ( self ) -> int: super().setUp() snake_case = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname, legacy_format=lowercase_ ) snake_case = tokenizer def _lowerCamelCase ( self ) -> Any: snake_case = '<pad>' snake_case = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ), lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ), lowercase_ ) def _lowerCamelCase ( self ) -> Optional[int]: snake_case = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '<s>' ) self.assertEqual(vocab_keys[1], '<pad>' ) self.assertEqual(vocab_keys[-1], '<mask>' ) self.assertEqual(len(lowercase_ ), 101122 ) def _lowerCamelCase ( self ) -> List[str]: self.assertEqual(self.get_tokenizer().vocab_size, 101122 ) @require_torch def _lowerCamelCase ( self ) -> str: snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] snake_case = [0, 57, 3018, 70307, 91, 2] snake_case = self.tokenizer( lowercase_, max_length=len(lowercase_ ), padding=lowercase_, truncation=lowercase_, return_tensors='pt' ) self.assertIsInstance(lowercase_, lowercase_ ) self.assertEqual((2, 6), batch.input_ids.shape ) self.assertEqual((2, 6), batch.attention_mask.shape ) snake_case = batch.input_ids.tolist()[0] self.assertListEqual(lowercase_, lowercase_ ) def _lowerCamelCase ( self ) -> Tuple: if not self.test_rust_tokenizer: return snake_case = self.get_tokenizer() snake_case = self.get_rust_tokenizer() snake_case = 'I was born in 92000, and this is falsé.' snake_case = tokenizer.tokenize(lowercase_ ) snake_case = rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_, lowercase_ ) snake_case = tokenizer.encode(lowercase_, add_special_tokens=lowercase_ ) snake_case = rust_tokenizer.encode(lowercase_, add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_, lowercase_ ) snake_case = self.get_rust_tokenizer() snake_case = tokenizer.encode(lowercase_ ) snake_case = rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_, lowercase_ ) @slow def _lowerCamelCase ( self ) -> str: # fmt: off snake_case = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. snake_case = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=lowercase_, model_name='moussaKam/mbarthez', revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6', sequences=lowercase_, )
332
'''simple docstring''' import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset lowerCAmelCase_ = pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) lowerCAmelCase_ = dataset.iloc[:, 1:2].values lowerCAmelCase_ = dataset.iloc[:, 2].values lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0) lowerCAmelCase_ = PolynomialFeatures(degree=4) lowerCAmelCase_ = poly_reg.fit_transform(X) lowerCAmelCase_ = LinearRegression() pol_reg.fit(X_poly, y) def __magic_name__ ( ) -> Any: plt.scatter(A , A , color='red' ) plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' ) plt.title('Truth or Bluff (Linear Regression)' ) plt.xlabel('Position level' ) plt.ylabel('Salary' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
332
1
'''simple docstring''' def __magic_name__ ( A , A , A ) -> float: if principal <= 0: raise Exception('Principal borrowed must be > 0' ) if rate_per_annum < 0: raise Exception('Rate of interest must be >= 0' ) if years_to_repay <= 0 or not isinstance(A , A ): raise Exception('Years to repay must be an integer > 0' ) # Yearly rate is divided by 12 to get monthly rate snake_case = rate_per_annum / 1_2 # Years to repay is multiplied by 12 to get number of payments as payment is monthly snake_case = years_to_repay * 1_2 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
332
'''simple docstring''' import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''''' snake_case_ = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) snake_case_ = None # compression type in fsspec. ex: "gzip" snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str: super().__init__(self, **lowercase_ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode snake_case = fsspec.open( lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={ 'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459 'trust_env': True, # Enable reading proxy env variables. **(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed. }, **(target_options or {}), ) snake_case = os.path.basename(self.file.path.split('::' )[0] ) snake_case = ( self.compressed_name[: self.compressed_name.rindex('.' )] if '.' in self.compressed_name else self.compressed_name ) snake_case = None @classmethod def _lowerCamelCase ( cls, lowercase_ ) -> Any: # compressed file paths are always relative to the archive root return super()._strip_protocol(lowercase_ ).lstrip('/' ) def _lowerCamelCase ( self ) -> Optional[Any]: if self.dir_cache is None: snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name} snake_case = {f['name']: f} def _lowerCamelCase ( self, lowercase_ ) -> str: return self.file.open().read() def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any: snake_case = self._strip_protocol(lowercase_ ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''bz2''' snake_case_ = '''bz2''' snake_case_ = '''.bz2''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''gzip''' snake_case_ = '''gzip''' snake_case_ = '''.gz''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''lz4''' snake_case_ = '''lz4''' snake_case_ = '''.lz4''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''xz''' snake_case_ = '''xz''' snake_case_ = '''.xz''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''zstd''' snake_case_ = '''zstd''' snake_case_ = '''.zst''' def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]: super().__init__( fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 snake_case = self.file.__enter__ class lowerCamelCase : def __init__( self, lowercase_ ) -> List[Any]: snake_case = file_ def __enter__( self ) -> Dict: self._file.__enter__() return self def __exit__( self, *lowercase_, **lowercase_ ) -> Dict: self._file.__exit__(*lowercase_, **lowercase_ ) def __iter__( self ) -> List[str]: return iter(self._file ) def _lowerCamelCase ( self ) -> List[str]: return next(self._file ) def __getattr__( self, lowercase_ ) -> List[Any]: return getattr(self._file, lowercase_ ) def fixed_enter(*lowercase_, **lowercase_ ): return WrappedFile(_enter(*lowercase_, **lowercase_ ) ) snake_case = fixed_enter
332
1
'''simple docstring''' import os def __magic_name__ ( A = "input.txt" ) -> int: with open(os.path.join(os.path.dirname(A ) , A ) ) as input_file: snake_case = [ [int(A ) for element in line.split(',' )] for line in input_file.readlines() ] snake_case = len(A ) snake_case = len(matrix[0] ) snake_case = [[-1 for _ in range(A )] for _ in range(A )] for i in range(A ): snake_case = matrix[i][0] for j in range(1 , A ): for i in range(A ): snake_case = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , A ): snake_case = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): snake_case = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f"{solution() = }")
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A , A , A ) -> int | float: if len(A ) == 0: raise ValueError('find_max() arg is an empty sequence' ) if ( left >= len(A ) or left < -len(A ) or right >= len(A ) or right < -len(A ) ): raise IndexError('list index out of range' ) if left == right: return nums[left] snake_case = (left + right) >> 1 # the middle snake_case = find_max(A , A , A ) # find max in range[left, mid] snake_case = find_max(A , mid + 1 , A ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
332
1
'''simple docstring''' lowerCAmelCase_ = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" lowerCAmelCase_ = [{"type": "code", "content": INSTALL_CONTENT}] lowerCAmelCase_ = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
332
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = 42 class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ): @register_to_config def __init__( self, lowercase_ = 3, lowercase_ = 3, lowercase_ = ("DownEncoderBlock2D",), lowercase_ = ("UpDecoderBlock2D",), lowercase_ = (64,), lowercase_ = 1, lowercase_ = "silu", lowercase_ = 3, lowercase_ = 32, lowercase_ = 256, lowercase_ = 32, lowercase_ = None, lowercase_ = 0.18_215, lowercase_ = "group", ) -> str: super().__init__() # pass init params to Encoder snake_case = Encoder( in_channels=lowercase_, out_channels=lowercase_, down_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, double_z=lowercase_, ) snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels snake_case = nn.Convad(lowercase_, lowercase_, 1 ) snake_case = VectorQuantizer(lowercase_, lowercase_, beta=0.25, remap=lowercase_, sane_index_shape=lowercase_ ) snake_case = nn.Convad(lowercase_, lowercase_, 1 ) # pass init params to Decoder snake_case = Decoder( in_channels=lowercase_, out_channels=lowercase_, up_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, norm_type=lowercase_, ) @apply_forward_hook def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> VQEncoderOutput: snake_case = self.encoder(lowercase_ ) snake_case = self.quant_conv(lowercase_ ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowercase_ ) @apply_forward_hook def _lowerCamelCase ( self, lowercase_, lowercase_ = False, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: # also go through quantization layer if not force_not_quantize: snake_case , snake_case , snake_case = self.quantize(lowercase_ ) else: snake_case = h snake_case = self.post_quant_conv(lowercase_ ) snake_case = self.decoder(lowercase_, quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: snake_case = sample snake_case = self.encode(lowercase_ ).latents snake_case = self.decode(lowercase_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowercase_ )
332
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowerCamelCase ( __lowerCAmelCase , unittest.TestCase ): snake_case_ = KandinskyVaaImgaImgPipeline snake_case_ = ['''image_embeds''', '''negative_image_embeds''', '''image'''] snake_case_ = [ '''image_embeds''', '''negative_image_embeds''', '''image''', ] snake_case_ = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] snake_case_ = False @property def _lowerCamelCase ( self ) -> Optional[Any]: return 32 @property def _lowerCamelCase ( self ) -> List[Any]: return 32 @property def _lowerCamelCase ( self ) -> int: return self.time_input_dim @property def _lowerCamelCase ( self ) -> Tuple: return self.time_input_dim * 4 @property def _lowerCamelCase ( self ) -> Tuple: return 100 @property def _lowerCamelCase ( self ) -> str: torch.manual_seed(0 ) snake_case = { 'in_channels': 4, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } snake_case = UNetaDConditionModel(**lowercase_ ) return model @property def _lowerCamelCase ( self ) -> Tuple: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _lowerCamelCase ( self ) -> str: torch.manual_seed(0 ) snake_case = VQModel(**self.dummy_movq_kwargs ) return model def _lowerCamelCase ( self ) -> str: snake_case = self.dummy_unet snake_case = self.dummy_movq snake_case = { 'num_train_timesteps': 1000, 'beta_schedule': 'linear', 'beta_start': 0.00_085, 'beta_end': 0.012, 'clip_sample': False, 'set_alpha_to_one': False, 'steps_offset': 0, 'prediction_type': 'epsilon', 'thresholding': False, } snake_case = DDIMScheduler(**lowercase_ ) snake_case = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def _lowerCamelCase ( self, lowercase_, lowercase_=0 ) -> str: snake_case = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowercase_ ) ).to(lowercase_ ) snake_case = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to( lowercase_ ) # create init_image snake_case = floats_tensor((1, 3, 64, 64), rng=random.Random(lowercase_ ) ).to(lowercase_ ) snake_case = image.cpu().permute(0, 2, 3, 1 )[0] snake_case = Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((256, 256) ) if str(lowercase_ ).startswith('mps' ): snake_case = torch.manual_seed(lowercase_ ) else: snake_case = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) snake_case = { 'image': init_image, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 10, 'guidance_scale': 7.0, 'strength': 0.2, 'output_type': 'np', } return inputs def _lowerCamelCase ( self ) -> Tuple: snake_case = 'cpu' snake_case = self.get_dummy_components() snake_case = self.pipeline_class(**lowercase_ ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = pipe(**self.get_dummy_inputs(lowercase_ ) ) snake_case = output.images snake_case = pipe( **self.get_dummy_inputs(lowercase_ ), return_dict=lowercase_, )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case = np.array( [0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self ) -> int: snake_case = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_img2img_frog.npy' ) snake_case = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) snake_case = 'A red cartoon frog, 4k' snake_case = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa ) pipe_prior.to(lowercase_ ) snake_case = KandinskyVaaImgaImgPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder', torch_dtype=torch.floataa ) snake_case = pipeline.to(lowercase_ ) pipeline.set_progress_bar_config(disable=lowercase_ ) snake_case = torch.Generator(device='cpu' ).manual_seed(0 ) snake_case , snake_case = pipe_prior( lowercase_, generator=lowercase_, num_inference_steps=5, negative_prompt='', ).to_tuple() snake_case = pipeline( image=lowercase_, image_embeds=lowercase_, negative_image_embeds=lowercase_, generator=lowercase_, num_inference_steps=100, height=768, width=768, strength=0.2, output_type='np', ) snake_case = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowercase_, lowercase_ )
332
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int: snake_case = [0] snake_case = 42 for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target snake_case = 0 # the area corresponding to the grid that gives the product closest to target snake_case = 0 # an estimate of b, using the quadratic formula snake_case = 42 # the largest integer less than b_estimate snake_case = 42 # the largest integer less than b_estimate snake_case = 42 # the triangle number corresponding to b_floor snake_case = 42 # the triangle number corresponding to b_ceil snake_case = 42 for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 snake_case = floor(A ) snake_case = ceil(A ) snake_case = triangle_numbers[b_floor] snake_case = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): snake_case = triangle_b_first_guess * triangle_a snake_case = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): snake_case = triangle_b_second_guess * triangle_a snake_case = idx_a * b_ceil return area if __name__ == "__main__": print(f"{solution() = }")
332
1
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {"vocab_file": "spiece.model"} lowerCAmelCase_ = { "vocab_file": { "albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model", "albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model", "albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model", "albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model", "albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model", "albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model", "albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model", "albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model", } } lowerCAmelCase_ = { "albert-base-v1": 5_1_2, "albert-large-v1": 5_1_2, "albert-xlarge-v1": 5_1_2, "albert-xxlarge-v1": 5_1_2, "albert-base-v2": 5_1_2, "albert-large-v2": 5_1_2, "albert-xlarge-v2": 5_1_2, "albert-xxlarge-v2": 5_1_2, } lowerCAmelCase_ = "▁" class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, lowercase_, lowercase_=True, lowercase_=True, lowercase_=False, lowercase_="[CLS]", lowercase_="[SEP]", lowercase_="<unk>", lowercase_="[SEP]", lowercase_="<pad>", lowercase_="[CLS]", lowercase_="[MASK]", lowercase_ = None, **lowercase_, ) -> None: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. snake_case = ( AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_, normalized=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else mask_token ) snake_case = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=lowercase_, remove_space=lowercase_, keep_accents=lowercase_, bos_token=lowercase_, eos_token=lowercase_, unk_token=lowercase_, sep_token=lowercase_, pad_token=lowercase_, cls_token=lowercase_, mask_token=lowercase_, sp_model_kwargs=self.sp_model_kwargs, **lowercase_, ) snake_case = do_lower_case snake_case = remove_space snake_case = keep_accents snake_case = vocab_file snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowercase_ ) @property def _lowerCamelCase ( self ) -> str: return len(self.sp_model ) def _lowerCamelCase ( self ) -> int: snake_case = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> str: snake_case = self.__dict__.copy() snake_case = None return state def __setstate__( self, lowercase_ ) -> Optional[Any]: snake_case = d # for backward compatibility if not hasattr(self, 'sp_model_kwargs' ): snake_case = {} snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowerCamelCase ( self, lowercase_ ) -> List[Any]: if self.remove_space: snake_case = ' '.join(inputs.strip().split() ) else: snake_case = inputs snake_case = outputs.replace('``', '"' ).replace('\'\'', '"' ) if not self.keep_accents: snake_case = unicodedata.normalize('NFKD', lowercase_ ) snake_case = ''.join([c for c in outputs if not unicodedata.combining(lowercase_ )] ) if self.do_lower_case: snake_case = outputs.lower() return outputs def _lowerCamelCase ( self, lowercase_ ) -> List[str]: snake_case = self.preprocess_text(lowercase_ ) snake_case = self.sp_model.encode(lowercase_, out_type=lowercase_ ) snake_case = [] for piece in pieces: if len(lowercase_ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit(): snake_case = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowercase_, '' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: snake_case = cur_pieces[1:] else: snake_case = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(lowercase_ ) else: new_pieces.append(lowercase_ ) return new_pieces def _lowerCamelCase ( self, lowercase_ ) -> int: return self.sp_model.PieceToId(lowercase_ ) def _lowerCamelCase ( self, lowercase_ ) -> Tuple: return self.sp_model.IdToPiece(lowercase_ ) def _lowerCamelCase ( self, lowercase_ ) -> str: snake_case = [] snake_case = '' snake_case = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowercase_ ) + token snake_case = True snake_case = [] else: current_sub_tokens.append(lowercase_ ) snake_case = False out_string += self.sp_model.decode(lowercase_ ) return out_string.strip() def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]: snake_case = [self.sep_token_id] snake_case = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_, token_ids_a=lowercase_, already_has_special_tokens=lowercase_ ) if token_ids_a is not None: return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1] return [1] + ([0] * len(lowercase_ )) + [1] def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]: snake_case = [self.sep_token_id] snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]: if not os.path.isdir(lowercase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case = os.path.join( lowercase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, lowercase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowercase_, 'wb' ) as fi: snake_case = self.sp_model.serialized_model_proto() fi.write(lowercase_ ) return (out_vocab_file,)
332
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = { "configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"], "processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["VisionTextDualEncoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["FlaxVisionTextDualEncoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["TFVisionTextDualEncoderModel"] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
332
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase_ = { "configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ "NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST", "NezhaForNextSentencePrediction", "NezhaForMaskedLM", "NezhaForPreTraining", "NezhaForMultipleChoice", "NezhaForQuestionAnswering", "NezhaForSequenceClassification", "NezhaForTokenClassification", "NezhaModel", "NezhaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
332
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCamelCase : snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} ) snake_case_ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) snake_case_ = field( default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , ) snake_case_ = field( default=64 , metadata={ '''help''': ( '''The maximum number of tokens for the question. Questions longer than this will ''' '''be truncated to this length.''' ) } , ) snake_case_ = field( default=30 , metadata={ '''help''': ( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ) } , ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} ) snake_case_ = field( default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) snake_case_ = field( default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) snake_case_ = field( default=0 , metadata={ '''help''': ( '''language id of input for language-specific xlm models (see''' ''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)''' ) } , ) snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} ) class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''train''' snake_case_ = '''dev''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int: snake_case = args snake_case = is_language_sensitive snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(lowercase_, lowercase_ ): try: snake_case = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) snake_case = mode # Load data features from cache or dataset file snake_case = 'v2' if args.version_2_with_negative else 'v1' snake_case = os.path.join( cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. snake_case = cached_features_file + '.lock' with FileLock(lowercase_ ): if os.path.exists(lowercase_ ) and not args.overwrite_cache: snake_case = time.time() snake_case = torch.load(lowercase_ ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. snake_case = self.old_features['features'] snake_case = self.old_features.get('dataset', lowercase_ ) snake_case = self.old_features.get('examples', lowercase_ ) logger.info( F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' ' future run' ) else: if mode == Split.dev: snake_case = self.processor.get_dev_examples(args.data_dir ) else: snake_case = self.processor.get_train_examples(args.data_dir ) snake_case , snake_case = squad_convert_examples_to_features( examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, ) snake_case = time.time() torch.save( {'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self ) -> Tuple: return len(self.features ) def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]: # Convert to Tensors and build dataset snake_case = self.features[i] snake_case = torch.tensor(feature.input_ids, dtype=torch.long ) snake_case = torch.tensor(feature.attention_mask, dtype=torch.long ) snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long ) snake_case = torch.tensor(feature.cls_index, dtype=torch.long ) snake_case = torch.tensor(feature.p_mask, dtype=torch.float ) snake_case = torch.tensor(feature.is_impossible, dtype=torch.float ) snake_case = { 'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'cls_index': cls_index, 'p_mask': p_mask} ) if self.args.version_2_with_negative: inputs.update({'is_impossible': is_impossible} ) if self.is_language_sensitive: inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: snake_case = torch.tensor(feature.start_position, dtype=torch.long ) snake_case = torch.tensor(feature.end_position, dtype=torch.long ) inputs.update({'start_positions': start_positions, 'end_positions': end_positions} ) return inputs
332
1
'''simple docstring''' import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) def __magic_name__ ( A , A , A ) -> List[str]: snake_case = UniSpeechSatForSequenceClassification.from_pretrained(A , config=A ) snake_case = downstream_dict['projector.weight'] snake_case = downstream_dict['projector.bias'] snake_case = downstream_dict['model.post_net.linear.weight'] snake_case = downstream_dict['model.post_net.linear.bias'] return model def __magic_name__ ( A , A , A ) -> Optional[Any]: snake_case = UniSpeechSatForAudioFrameClassification.from_pretrained(A , config=A ) snake_case = downstream_dict['model.linear.weight'] snake_case = downstream_dict['model.linear.bias'] return model def __magic_name__ ( A , A , A ) -> int: snake_case = UniSpeechSatForXVector.from_pretrained(A , config=A ) snake_case = downstream_dict['connector.weight'] snake_case = downstream_dict['connector.bias'] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): snake_case = downstream_dict[ F'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] snake_case = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] snake_case = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight'] snake_case = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias'] snake_case = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight'] snake_case = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias'] snake_case = downstream_dict['objective.W'] return model @torch.no_grad() def __magic_name__ ( A , A , A , A ) -> Optional[Any]: snake_case = torch.load(A , map_location='cpu' ) snake_case = checkpoint['Downstream'] snake_case = UniSpeechSatConfig.from_pretrained(A ) snake_case = WavaVecaFeatureExtractor.from_pretrained( A , return_attention_mask=A , do_normalize=A ) snake_case = hf_config.architectures[0] if arch.endswith('ForSequenceClassification' ): snake_case = convert_classification(A , A , A ) elif arch.endswith('ForAudioFrameClassification' ): snake_case = convert_diarization(A , A , A ) elif arch.endswith('ForXVector' ): snake_case = convert_xvector(A , A , A ) else: raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: snake_case = checkpoint['Featurizer']['weights'] hf_feature_extractor.save_pretrained(A ) hf_model.save_pretrained(A ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( "--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model." ) parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.") parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.") lowerCAmelCase_ = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
332
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def __magic_name__ ( A , A , A ) -> Any: # Initialise PyTorch model snake_case = BertConfig.from_json_file(A ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case = BertForPreTraining(A ) # Load weights from tf checkpoint load_tf_weights_in_bert(A , A , A ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , A ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--bert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
332
1
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A ) -> list: if len(A ) == 0: return [] snake_case , snake_case = min(A ), max(A ) snake_case = int(max_value - min_value ) + 1 snake_case = [[] for _ in range(A )] for i in my_list: buckets[int(i - min_value )].append(A ) return [v for bucket in buckets for v in sorted(A )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A ) -> list: if len(A ) == 0: return [] snake_case , snake_case = min(A ), max(A ) snake_case = int(max_value - min_value ) + 1 snake_case = [[] for _ in range(A )] for i in my_list: buckets[int(i - min_value )].append(A ) return [v for bucket in buckets for v in sorted(A )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
332
1
'''simple docstring''' lowerCAmelCase_ = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" lowerCAmelCase_ = [{"type": "code", "content": INSTALL_CONTENT}] lowerCAmelCase_ = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
332
'''simple docstring''' def __magic_name__ ( A ) -> float: return 1_0 - x * x def __magic_name__ ( A , A ) -> float: # Bolzano theory in order to find if there is a root between a and b if equation(A ) * equation(A ) >= 0: raise ValueError('Wrong space!' ) snake_case = a while (b - a) >= 0.01: # Find middle point snake_case = (a + b) / 2 # Check if middle point is root if equation(A ) == 0.0: break # Decide the side to repeat the steps if equation(A ) * equation(A ) < 0: snake_case = c else: snake_case = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
332
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { "asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''sew-d''' def __init__( self, lowercase_=32, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_=2, lowercase_=512, lowercase_=256, lowercase_=True, lowercase_=True, lowercase_=("p2c", "c2p"), lowercase_="layer_norm", lowercase_="gelu_python", lowercase_=0.1, lowercase_=0.1, lowercase_=0.1, lowercase_=0.0, lowercase_=0.1, lowercase_=0.02, lowercase_=1E-7, lowercase_=1E-5, lowercase_="group", lowercase_="gelu", lowercase_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512), lowercase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), lowercase_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), lowercase_=False, lowercase_=128, lowercase_=16, lowercase_=True, lowercase_=0.05, lowercase_=10, lowercase_=2, lowercase_=0.0, lowercase_=10, lowercase_=0, lowercase_="mean", lowercase_=False, lowercase_=False, lowercase_=256, lowercase_=0, lowercase_=1, lowercase_=2, **lowercase_, ) -> Optional[Any]: super().__init__(**lowercase_, pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_ ) snake_case = hidden_size snake_case = feat_extract_norm snake_case = feat_extract_activation snake_case = list(lowercase_ ) snake_case = list(lowercase_ ) snake_case = list(lowercase_ ) snake_case = conv_bias snake_case = num_conv_pos_embeddings snake_case = num_conv_pos_embedding_groups snake_case = len(self.conv_dim ) snake_case = num_hidden_layers snake_case = intermediate_size snake_case = squeeze_factor snake_case = max_position_embeddings snake_case = position_buckets snake_case = share_att_key snake_case = relative_attention snake_case = norm_rel_ebd snake_case = list(lowercase_ ) snake_case = hidden_act snake_case = num_attention_heads snake_case = hidden_dropout snake_case = attention_dropout snake_case = activation_dropout snake_case = feat_proj_dropout snake_case = final_dropout snake_case = layer_norm_eps snake_case = feature_layer_norm_eps snake_case = initializer_range snake_case = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 snake_case = apply_spec_augment snake_case = mask_time_prob snake_case = mask_time_length snake_case = mask_time_min_masks snake_case = mask_feature_prob snake_case = mask_feature_length snake_case = mask_feature_min_masks # ctc loss snake_case = ctc_loss_reduction snake_case = ctc_zero_infinity # sequence classification snake_case = use_weighted_layer_sum snake_case = classifier_proj_size @property def _lowerCamelCase ( self ) -> Optional[Any]: return functools.reduce(operator.mul, self.conv_stride, 1 )
332
'''simple docstring''' import pytest lowerCAmelCase_ = "__dummy_dataset1__" lowerCAmelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n" @pytest.fixture def __magic_name__ ( ) -> List[Any]: return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def __magic_name__ ( ) -> Union[str, Any]: return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def __magic_name__ ( A , A , A ) -> Optional[int]: snake_case = dataset_loading_script_name snake_case = tmp_path / 'datasets' / script_name script_dir.mkdir(parents=A ) snake_case = script_dir / F'''{script_name}.py''' with open(A , 'w' ) as f: f.write(A ) return str(A )
332
1
'''simple docstring''' import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''Speech2TextFeatureExtractor''' snake_case_ = '''Speech2TextTokenizer''' def __init__( self, lowercase_, lowercase_ ) -> Union[str, Any]: super().__init__(lowercase_, lowercase_ ) snake_case = self.feature_extractor snake_case = False def __call__( self, *lowercase_, **lowercase_ ) -> Optional[Any]: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*lowercase_, **lowercase_ ) if "raw_speech" in kwargs: warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' ) snake_case = kwargs.pop('raw_speech' ) else: snake_case = kwargs.pop('audio', lowercase_ ) snake_case = kwargs.pop('sampling_rate', lowercase_ ) snake_case = kwargs.pop('text', lowercase_ ) if len(lowercase_ ) > 0: snake_case = args[0] snake_case = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if audio is not None: snake_case = self.feature_extractor(lowercase_, *lowercase_, sampling_rate=lowercase_, **lowercase_ ) if text is not None: snake_case = self.tokenizer(lowercase_, **lowercase_ ) if text is None: return inputs elif audio is None: return encodings else: snake_case = encodings['input_ids'] return inputs def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> Any: return self.tokenizer.batch_decode(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> str: return self.tokenizer.decode(*lowercase_, **lowercase_ ) @contextmanager def _lowerCamelCase ( self ) -> int: warnings.warn( '`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ' 'labels by using the argument `text` of the regular `__call__` method (either in the same call as ' 'your audio inputs, or in a separate call.' ) snake_case = True snake_case = self.tokenizer yield snake_case = self.feature_extractor snake_case = False
332
'''simple docstring''' from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time lowerCAmelCase_ = Lock() def __magic_name__ ( A , A , A , A , A , A , A ) -> Any: global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 1_0 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(A ) process_lock.release() # receive your right neighbor's value process_lock.acquire() snake_case = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left snake_case = min(A , A ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(A ) process_lock.release() # receive your left neighbor's value process_lock.acquire() snake_case = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right snake_case = max(A , A ) # after all swaps are performed, send the values back to main result_pipe[1].send(A ) def __magic_name__ ( A ) -> str: snake_case = [] snake_case = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop snake_case = Pipe() snake_case = Pipe() process_array_.append( Process( target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) snake_case = temp_rs snake_case = temp_rr for i in range(1 , len(A ) - 1 ): snake_case = Pipe() snake_case = Pipe() process_array_.append( Process( target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) snake_case = temp_rs snake_case = temp_rr process_array_.append( Process( target=A , args=( len(A ) - 1, arr[len(A ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(A ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(A ) ): snake_case = result_pipe[p][0].recv() process_array_[p].join() return arr def __magic_name__ ( ) -> Tuple: snake_case = list(range(1_0 , 0 , -1 ) ) print('Initial List' ) print(*A ) snake_case = odd_even_transposition(A ) print('Sorted List\n' ) print(*A ) if __name__ == "__main__": main()
332
1
'''simple docstring''' import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowerCAmelCase_ = "bert-base-cased" lowerCAmelCase_ = "google/pegasus-xsum" lowerCAmelCase_ = [" Sam ate lunch today.", "Sams lunch ingredients."] lowerCAmelCase_ = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"] lowerCAmelCase_ = "patrickvonplaten/t5-tiny-random" lowerCAmelCase_ = "sshleifer/bart-tiny-random" lowerCAmelCase_ = "sshleifer/tiny-mbart" lowerCAmelCase_ = "sshleifer/tiny-marian-en-de" def __magic_name__ ( A , A ) -> Optional[Any]: snake_case = '\n'.join(A ) Path(A ).open('w' ).writelines(A ) def __magic_name__ ( A ) -> int: for split in ["train", "val", "test"]: _dump_articles(os.path.join(A , F'''{split}.source''' ) , A ) _dump_articles(os.path.join(A , F'''{split}.target''' ) , A ) return tmp_dir class lowerCamelCase ( __lowerCAmelCase ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ], ) @slow def _lowerCamelCase ( self, lowercase_ ) -> Optional[int]: snake_case = AutoTokenizer.from_pretrained(lowercase_ ) snake_case = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) snake_case = max(len(tokenizer.encode(lowercase_ ) ) for a in ARTICLES ) snake_case = max(len(tokenizer.encode(lowercase_ ) ) for a in SUMMARIES ) snake_case = 4 snake_case = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated snake_case , snake_case = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error. snake_case = SeqaSeqDataset( lowercase_, data_dir=lowercase_, type_path='train', max_source_length=lowercase_, max_target_length=lowercase_, src_lang=lowercase_, tgt_lang=lowercase_, ) snake_case = DataLoader(lowercase_, batch_size=2, collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(lowercase_, lowercase_ ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place snake_case = shift_tokens_right(batch['labels'], tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def _lowerCamelCase ( self, lowercase_ ) -> Optional[int]: snake_case = AutoTokenizer.from_pretrained(lowercase_ ) snake_case = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) snake_case = max(len(tokenizer.encode(lowercase_ ) ) for a in ARTICLES ) snake_case = max(len(tokenizer.encode(lowercase_ ) ) for a in SUMMARIES ) snake_case = 4 snake_case = LegacySeqaSeqDataset( lowercase_, data_dir=lowercase_, type_path='train', max_source_length=20, max_target_length=lowercase_, ) snake_case = DataLoader(lowercase_, batch_size=2, collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def _lowerCamelCase ( self ) -> Tuple: snake_case = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' ) snake_case = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) snake_case = tmp_dir.joinpath('train.source' ).open().readlines() snake_case = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(lowercase_, lowercase_, 128, lowercase_ ) snake_case = {x.name for x in tmp_dir.iterdir()} snake_case = {x.name for x in save_dir.iterdir()} snake_case = save_dir.joinpath('train.source' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(lowercase_ ) < len(lowercase_ ) assert len(lowercase_ ) == 1 assert len(packed_examples[0] ) == sum(len(lowercase_ ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE, reason='This test requires fairseq' ) def _lowerCamelCase ( self ) -> Optional[int]: if not FAIRSEQ_AVAILABLE: return snake_case , snake_case , snake_case = self._get_dataset(max_len=64 ) snake_case = 64 snake_case = ds.make_dynamic_sampler(lowercase_, required_batch_size_multiple=lowercase_ ) snake_case = [len(lowercase_ ) for x in batch_sampler] assert len(set(lowercase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(lowercase_ ) == len(lowercase_ ) # no dropped or added examples snake_case = DataLoader(lowercase_, batch_sampler=lowercase_, collate_fn=ds.collate_fn, num_workers=2 ) snake_case = [] snake_case = [] for batch in data_loader: snake_case = batch['input_ids'].shape snake_case = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple snake_case = np.product(batch['input_ids'].shape ) num_src_per_batch.append(lowercase_ ) if num_src_tokens > (max_tokens * 1.1): failures.append(lowercase_ ) assert num_src_per_batch[0] == max(lowercase_ ) if failures: raise AssertionError(F'''too many tokens in {len(lowercase_ )} batches''' ) def _lowerCamelCase ( self ) -> Any: snake_case , snake_case , snake_case = self._get_dataset(max_len=512 ) snake_case = 2 snake_case = ds.make_sortish_sampler(lowercase_, shuffle=lowercase_ ) snake_case = DataLoader(lowercase_, batch_size=lowercase_, collate_fn=ds.collate_fn, num_workers=2 ) snake_case = DataLoader(lowercase_, batch_size=lowercase_, collate_fn=ds.collate_fn, num_workers=2, sampler=lowercase_ ) snake_case = tokenizer.pad_token_id def count_pad_tokens(lowercase_, lowercase_="input_ids" ): return [batch[k].eq(lowercase_ ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(lowercase_, k='labels' ) ) < sum(count_pad_tokens(lowercase_, k='labels' ) ) assert sum(count_pad_tokens(lowercase_ ) ) < sum(count_pad_tokens(lowercase_ ) ) assert len(lowercase_ ) == len(lowercase_ ) def _lowerCamelCase ( self, lowercase_=1000, lowercase_=128 ) -> List[Any]: if os.getenv('USE_REAL_DATA', lowercase_ ): snake_case = 'examples/seq2seq/wmt_en_ro' snake_case = max_len * 2 * 64 if not Path(lowercase_ ).joinpath('train.len' ).exists(): save_len_file(lowercase_, lowercase_ ) else: snake_case = 'examples/seq2seq/test_data/wmt_en_ro' snake_case = max_len * 4 save_len_file(lowercase_, lowercase_ ) snake_case = AutoTokenizer.from_pretrained(lowercase_ ) snake_case = SeqaSeqDataset( lowercase_, data_dir=lowercase_, type_path='train', max_source_length=lowercase_, max_target_length=lowercase_, n_obs=lowercase_, ) return ds, max_tokens, tokenizer def _lowerCamelCase ( self ) -> Optional[Any]: snake_case , snake_case , snake_case = self._get_dataset() snake_case = set(DistributedSortishSampler(lowercase_, 256, num_replicas=2, rank=0, add_extra_examples=lowercase_ ) ) snake_case = set(DistributedSortishSampler(lowercase_, 256, num_replicas=2, rank=1, add_extra_examples=lowercase_ ) ) assert idsa.intersection(lowercase_ ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ], ) def _lowerCamelCase ( self, lowercase_ ) -> Optional[Any]: snake_case = AutoTokenizer.from_pretrained(lowercase_, use_fast=lowercase_ ) if tok_name == MBART_TINY: snake_case = SeqaSeqDataset( lowercase_, data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ), type_path='train', max_source_length=4, max_target_length=8, src_lang='EN', tgt_lang='FR', ) snake_case = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: snake_case = SeqaSeqDataset( lowercase_, data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ), type_path='train', max_source_length=4, max_target_length=8, ) snake_case = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(lowercase_ ) == 1 if tok_name == BART_TINY else len(lowercase_ ) == 0
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A ) -> None: create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] ) def __magic_name__ ( A , A , A , A , ) -> None: if index == len(A ): print(A ) return for i in range(len(A ) ): if not index_used[i]: current_sequence.append(sequence[i] ) snake_case = True create_state_space_tree(A , A , index + 1 , A ) current_sequence.pop() snake_case = False lowerCAmelCase_ = [3, 1, 2, 4] generate_all_permutations(sequence) lowerCAmelCase_ = ["A", "B", "C"] generate_all_permutations(sequence_a)
332
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { "asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json", # See all SEW models at https://huggingface.co/models?filter=sew } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''sew''' def __init__( self, lowercase_=32, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_=2, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=0.1, lowercase_=0.0, lowercase_=0.1, lowercase_=0.1, lowercase_=0.02, lowercase_=1E-5, lowercase_="group", lowercase_="gelu", lowercase_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512), lowercase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), lowercase_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), lowercase_=False, lowercase_=128, lowercase_=16, lowercase_=True, lowercase_=0.05, lowercase_=10, lowercase_=2, lowercase_=0.0, lowercase_=10, lowercase_=0, lowercase_="mean", lowercase_=False, lowercase_=False, lowercase_=256, lowercase_=0, lowercase_=1, lowercase_=2, **lowercase_, ) -> List[Any]: super().__init__(**lowercase_, pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_ ) snake_case = hidden_size snake_case = feat_extract_norm snake_case = feat_extract_activation snake_case = list(lowercase_ ) snake_case = list(lowercase_ ) snake_case = list(lowercase_ ) snake_case = conv_bias snake_case = num_conv_pos_embeddings snake_case = num_conv_pos_embedding_groups snake_case = len(self.conv_dim ) snake_case = num_hidden_layers snake_case = intermediate_size snake_case = squeeze_factor snake_case = hidden_act snake_case = num_attention_heads snake_case = hidden_dropout snake_case = attention_dropout snake_case = activation_dropout snake_case = feat_proj_dropout snake_case = final_dropout snake_case = layerdrop snake_case = layer_norm_eps snake_case = initializer_range snake_case = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 snake_case = apply_spec_augment snake_case = mask_time_prob snake_case = mask_time_length snake_case = mask_time_min_masks snake_case = mask_feature_prob snake_case = mask_feature_length snake_case = mask_feature_min_masks # ctc loss snake_case = ctc_loss_reduction snake_case = ctc_zero_infinity # sequence classification snake_case = use_weighted_layer_sum snake_case = classifier_proj_size @property def _lowerCamelCase ( self ) -> Union[str, Any]: return functools.reduce(operator.mul, self.conv_stride, 1 )
332
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''roberta''' def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple: super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ ) snake_case = vocab_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = hidden_act snake_case = intermediate_size snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_vocab_size snake_case = initializer_range snake_case = layer_norm_eps snake_case = position_embedding_type snake_case = use_cache snake_case = classifier_dropout class lowerCamelCase ( __lowerCAmelCase ): @property def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
332
1
'''simple docstring''' import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class lowerCamelCase ( unittest.TestCase ): snake_case_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ ) -> int: snake_case = hf_hub_download( repo_id='nateraw/video-demo', filename='archery.mp4', repo_type='dataset' ) snake_case = VideoClassificationPipeline(model=lowercase_, image_processor=lowercase_, top_k=2 ) snake_case = [ example_video_filepath, 'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4', ] return video_classifier, examples def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> List[str]: for example in examples: snake_case = video_classifier(lowercase_ ) self.assertEqual( lowercase_, [ {'score': ANY(lowercase_ ), 'label': ANY(lowercase_ )}, {'score': ANY(lowercase_ ), 'label': ANY(lowercase_ )}, ], ) @require_torch def _lowerCamelCase ( self ) -> Optional[int]: snake_case = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification' snake_case = VideoMAEFeatureExtractor( size={'shortest_edge': 10}, crop_size={'height': 10, 'width': 10} ) snake_case = pipeline( 'video-classification', model=lowercase_, feature_extractor=lowercase_, frame_sampling_rate=4 ) snake_case = hf_hub_download(repo_id='nateraw/video-demo', filename='archery.mp4', repo_type='dataset' ) snake_case = video_classifier(lowercase_, top_k=2 ) self.assertEqual( nested_simplify(lowercase_, decimals=4 ), [{'score': 0.5_199, 'label': 'LABEL_0'}, {'score': 0.4_801, 'label': 'LABEL_1'}], ) snake_case = video_classifier( [ video_file_path, video_file_path, ], top_k=2, ) self.assertEqual( nested_simplify(lowercase_, decimals=4 ), [ [{'score': 0.5_199, 'label': 'LABEL_0'}, {'score': 0.4_801, 'label': 'LABEL_1'}], [{'score': 0.5_199, 'label': 'LABEL_0'}, {'score': 0.4_801, 'label': 'LABEL_1'}], ], ) @require_tf def _lowerCamelCase ( self ) -> str: pass
332
'''simple docstring''' import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} lowerCAmelCase_ = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } lowerCAmelCase_ = { "allenai/led-base-16384": 1_6_3_8_4, } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = LEDTokenizer snake_case_ = ['''input_ids''', '''attention_mask'''] def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int: super().__init__( lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, ) snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space: snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) ) snake_case = add_prefix_space snake_case = pre_tok_class(**lowercase_ ) snake_case = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` snake_case = 'post_processor' snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ ) if tokenizer_component_instance: snake_case = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: snake_case = tuple(state['sep'] ) if "cls" in state: snake_case = tuple(state['cls'] ) snake_case = False if state.get('add_prefix_space', lowercase_ ) != add_prefix_space: snake_case = add_prefix_space snake_case = True if state.get('trim_offsets', lowercase_ ) != trim_offsets: snake_case = trim_offsets snake_case = True if changes_to_apply: snake_case = getattr(lowercase_, state.pop('type' ) ) snake_case = component_class(**lowercase_ ) setattr(self.backend_tokenizer, lowercase_, lowercase_ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _lowerCamelCase ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _lowerCamelCase ( self, lowercase_ ) -> Any: snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value snake_case = value def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding: snake_case = kwargs.get('is_split_into_words', lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding: snake_case = kwargs.get('is_split_into_words', lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._encode_plus(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]: snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ ) return tuple(lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict: snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]: snake_case = [self.sep_token_id] snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict: snake_case = super()._pad( encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, ) # Load from model defaults if return_attention_mask is None: snake_case = 'attention_mask' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: snake_case = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ ) if needs_to_be_padded: snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` snake_case = ( encoded_inputs['global_attention_mask'] + [-1] * difference ) elif self.padding_side == "left": snake_case = [-1] * difference + encoded_inputs[ 'global_attention_mask' ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return encoded_inputs
332
1
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self, lowercase_ ) -> Any: for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'], model_result['ss'] ): snake_case = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(lowercase_ ) def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = 'sshleifer/tiny-gpt2' snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], multi_process=lowercase_, ) snake_case = PyTorchBenchmark(lowercase_ ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ) -> str: snake_case = 'sgugger/tiny-distilbert-classification' snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], multi_process=lowercase_, only_pretrain_model=lowercase_, ) snake_case = PyTorchBenchmark(lowercase_ ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = 'sshleifer/tiny-gpt2' snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, torchscript=lowercase_, sequence_lengths=[8], batch_sizes=[1], multi_process=lowercase_, ) snake_case = PyTorchBenchmark(lowercase_ ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == 'cpu', 'Cant do half precision' ) def _lowerCamelCase ( self ) -> Any: snake_case = 'sshleifer/tiny-gpt2' snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, fpaa=lowercase_, sequence_lengths=[8], batch_sizes=[1], multi_process=lowercase_, ) snake_case = PyTorchBenchmark(lowercase_ ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ) -> List[Any]: snake_case = 'sshleifer/tiny-gpt2' snake_case = AutoConfig.from_pretrained(lowercase_ ) # set architectures equal to `None` snake_case = None snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], multi_process=lowercase_, ) snake_case = PyTorchBenchmark(lowercase_, configs=[config] ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ) -> str: snake_case = 'sshleifer/tiny-gpt2' snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], multi_process=lowercase_, ) snake_case = PyTorchBenchmark(lowercase_ ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == 'cpu', 'Can\'t do half precision' ) def _lowerCamelCase ( self ) -> Optional[int]: snake_case = 'sshleifer/tiny-gpt2' snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], fpaa=lowercase_, multi_process=lowercase_, ) snake_case = PyTorchBenchmark(lowercase_ ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _lowerCamelCase ( self ) -> str: snake_case = 'sshleifer/tiny-gpt2' snake_case = AutoConfig.from_pretrained(lowercase_ ) snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], multi_process=lowercase_, ) snake_case = PyTorchBenchmark(lowercase_, configs=[config] ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = 'sshleifer/tinier_bart' snake_case = AutoConfig.from_pretrained(lowercase_ ) snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], multi_process=lowercase_, ) snake_case = PyTorchBenchmark(lowercase_, configs=[config] ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ) -> str: snake_case = 'sshleifer/tiny-gpt2' snake_case = AutoConfig.from_pretrained(lowercase_ ) snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], multi_process=lowercase_, ) snake_case = PyTorchBenchmark(lowercase_, configs=[config] ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _lowerCamelCase ( self ) -> int: snake_case = 'sshleifer/tinier_bart' snake_case = AutoConfig.from_pretrained(lowercase_ ) snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], multi_process=lowercase_, ) snake_case = PyTorchBenchmark(lowercase_, configs=[config] ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _lowerCamelCase ( self ) -> Optional[int]: snake_case = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, save_to_csv=lowercase_, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(lowercase_, 'inf_time.csv' ), train_memory_csv_file=os.path.join(lowercase_, 'train_mem.csv' ), inference_memory_csv_file=os.path.join(lowercase_, 'inf_mem.csv' ), train_time_csv_file=os.path.join(lowercase_, 'train_time.csv' ), env_info_csv_file=os.path.join(lowercase_, 'env.csv' ), multi_process=lowercase_, ) snake_case = PyTorchBenchmark(lowercase_ ) benchmark.run() self.assertTrue(Path(os.path.join(lowercase_, 'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase_, 'train_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase_, 'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase_, 'train_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase_, 'env.csv' ) ).exists() ) def _lowerCamelCase ( self ) -> Tuple: snake_case = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(lowercase_ ): self.assertTrue(hasattr(lowercase_, 'sequential' ) ) self.assertTrue(hasattr(lowercase_, 'cumulative' ) ) self.assertTrue(hasattr(lowercase_, 'current' ) ) self.assertTrue(hasattr(lowercase_, 'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(lowercase_, 'log.txt' ), log_print=lowercase_, trace_memory_line_by_line=lowercase_, multi_process=lowercase_, ) snake_case = PyTorchBenchmark(lowercase_ ) snake_case = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(lowercase_, 'log.txt' ) ).exists() )
332
'''simple docstring''' import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def __magic_name__ ( A ) -> Tuple: snake_case = [] embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', F'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', F'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', F'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', F'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def __magic_name__ ( A , A ) -> Optional[int]: snake_case = [] attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def __magic_name__ ( A ) -> List[Any]: snake_case = [] token.append((F'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') ) return token def __magic_name__ ( ) -> Dict: snake_case = [] head.append(('layernorm.weight', 'norm.weight') ) head.append(('layernorm.bias', 'norm.bias') ) head.append(('classifier.weight', 'head.weight') ) head.append(('classifier.bias', 'head.bias') ) return head def __magic_name__ ( A , A , A , A ) -> int: snake_case = 'imagenet-1k-id2label.json' snake_case = 1_0_0_0 snake_case = 'huggingface/label-files' snake_case = num_labels snake_case = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) ) snake_case = {int(A ): v for k, v in idalabel.items()} snake_case = idalabel snake_case = {v: k for k, v in idalabel.items()} snake_case = snake_case = CvtConfig(num_labels=A , idalabel=A , labelaid=A ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13": snake_case = [1, 2, 1_0] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21": snake_case = [1, 4, 1_6] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: snake_case = [2, 2, 2_0] snake_case = [3, 1_2, 1_6] snake_case = [1_9_2, 7_6_8, 1_0_2_4] snake_case = CvtForImageClassification(A ) snake_case = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) snake_case = image_size snake_case = torch.load(A , map_location=torch.device('cpu' ) ) snake_case = OrderedDict() snake_case = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: snake_case = list_of_state_dict + cls_token(A ) snake_case = list_of_state_dict + embeddings(A ) for cnt in range(config.depth[idx] ): snake_case = list_of_state_dict + attention(A , A ) snake_case = list_of_state_dict + final() for gg in list_of_state_dict: print(A ) for i in range(len(A ) ): snake_case = original_weights[list_of_state_dict[i][1]] model.load_state_dict(A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( "--cvt_model", default="cvt-w24", type=str, help="Name of the cvt model you'd like to convert.", ) parser.add_argument( "--image_size", default=3_8_4, type=int, help="Input Image Size", ) parser.add_argument( "--cvt_file_name", default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth", type=str, help="Input Image Size", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) lowerCAmelCase_ = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
332
1
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=__lowerCAmelCase ) class lowerCamelCase ( __lowerCAmelCase ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization snake_case_ = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) snake_case_ = Features({'''text''': Value('''string''' )} ) snake_case_ = Features({'''summary''': Value('''string''' )} ) snake_case_ = "text" snake_case_ = "summary" @property def _lowerCamelCase ( self ) -> Dict[str, str]: return {self.text_column: "text", self.summary_column: "summary"}
332
'''simple docstring''' from pathlib import Path import fire def __magic_name__ ( A , A , A ) -> Union[str, Any]: snake_case = Path(A ) snake_case = Path(A ) dest_dir.mkdir(exist_ok=A ) for path in src_dir.iterdir(): snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n] snake_case = dest_dir.joinpath(path.name ) print(A ) dest_path.open('w' ).write('\n'.join(A ) ) if __name__ == "__main__": fire.Fire(minify)
332
1
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A , A , A ) -> int | float: if len(A ) == 0: raise ValueError('find_max() arg is an empty sequence' ) if ( left >= len(A ) or left < -len(A ) or right >= len(A ) or right < -len(A ) ): raise IndexError('list index out of range' ) if left == right: return nums[left] snake_case = (left + right) >> 1 # the middle snake_case = find_max(A , A , A ) # find max in range[left, mid] snake_case = find_max(A , mid + 1 , A ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
332
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) lowerCAmelCase_ = pytest.mark.integration @pytest.mark.parametrize('path' , ['paws', 'csv'] ) def __magic_name__ ( A , A ) -> Union[str, Any]: inspect_dataset(A , A ) snake_case = path + '.py' assert script_name in os.listdir(A ) assert "__pycache__" not in os.listdir(A ) @pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.parametrize('path' , ['accuracy'] ) def __magic_name__ ( A , A ) -> int: inspect_metric(A , A ) snake_case = path + '.py' assert script_name in os.listdir(A ) assert "__pycache__" not in os.listdir(A ) @pytest.mark.parametrize( 'path, config_name, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> List[str]: snake_case = get_dataset_config_info(A , config_name=A ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __magic_name__ ( A , A , A ) -> Any: with pytest.raises(A ): get_dataset_config_info(A , config_name=A ) @pytest.mark.parametrize( 'path, expected' , [ ('squad', 'plain_text'), ('acronym_identification', 'default'), ('lhoestq/squad', 'plain_text'), ('lhoestq/test', 'default'), ('lhoestq/demo1', 'lhoestq--demo1'), ('dalle-mini/wit', 'dalle-mini--wit'), ] , ) def __magic_name__ ( A , A ) -> Dict: snake_case = get_dataset_config_names(A ) assert expected in config_names @pytest.mark.parametrize( 'path, expected_configs, expected_splits_in_first_config' , [ ('squad', ['plain_text'], ['train', 'validation']), ('dalle-mini/wit', ['dalle-mini--wit'], ['train']), ('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> List[str]: snake_case = get_dataset_infos(A ) assert list(infos.keys() ) == expected_configs snake_case = expected_configs[0] assert expected_config in infos snake_case = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( 'path, expected_config, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> Any: snake_case = get_dataset_infos(A ) assert expected_config in infos snake_case = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __magic_name__ ( A , A , A ) -> int: with pytest.raises(A ): get_dataset_split_names(A , config_name=A )
332
1
'''simple docstring''' import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( "The `inpainting.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionInpaintPipeline` instead." )
332
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { "configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"], "processing_git": ["GitProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ "GIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GitForCausalLM", "GitModel", "GitPreTrainedModel", "GitVisionModel", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
332
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowerCamelCase ( __lowerCAmelCase , unittest.TestCase ): snake_case_ = KandinskyVaaControlnetImgaImgPipeline snake_case_ = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint'''] snake_case_ = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint'''] snake_case_ = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] snake_case_ = False @property def _lowerCamelCase ( self ) -> Optional[int]: return 32 @property def _lowerCamelCase ( self ) -> Dict: return 32 @property def _lowerCamelCase ( self ) -> List[str]: return self.time_input_dim @property def _lowerCamelCase ( self ) -> Tuple: return self.time_input_dim * 4 @property def _lowerCamelCase ( self ) -> str: return 100 @property def _lowerCamelCase ( self ) -> Any: torch.manual_seed(0 ) snake_case = { 'in_channels': 8, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image_hint', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } snake_case = UNetaDConditionModel(**lowercase_ ) return model @property def _lowerCamelCase ( self ) -> str: return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def _lowerCamelCase ( self ) -> Tuple: torch.manual_seed(0 ) snake_case = VQModel(**self.dummy_movq_kwargs ) return model def _lowerCamelCase ( self ) -> Tuple: snake_case = self.dummy_unet snake_case = self.dummy_movq snake_case = { 'num_train_timesteps': 1000, 'beta_schedule': 'linear', 'beta_start': 0.00_085, 'beta_end': 0.012, 'clip_sample': False, 'set_alpha_to_one': False, 'steps_offset': 0, 'prediction_type': 'epsilon', 'thresholding': False, } snake_case = DDIMScheduler(**lowercase_ ) snake_case = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def _lowerCamelCase ( self, lowercase_, lowercase_=0 ) -> Dict: snake_case = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowercase_ ) ).to(lowercase_ ) snake_case = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to( lowercase_ ) # create init_image snake_case = floats_tensor((1, 3, 64, 64), rng=random.Random(lowercase_ ) ).to(lowercase_ ) snake_case = image.cpu().permute(0, 2, 3, 1 )[0] snake_case = Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((256, 256) ) # create hint snake_case = floats_tensor((1, 3, 64, 64), rng=random.Random(lowercase_ ) ).to(lowercase_ ) if str(lowercase_ ).startswith('mps' ): snake_case = torch.manual_seed(lowercase_ ) else: snake_case = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) snake_case = { 'image': init_image, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'hint': hint, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 10, 'guidance_scale': 7.0, 'strength': 0.2, 'output_type': 'np', } return inputs def _lowerCamelCase ( self ) -> int: snake_case = 'cpu' snake_case = self.get_dummy_components() snake_case = self.pipeline_class(**lowercase_ ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = pipe(**self.get_dummy_inputs(lowercase_ ) ) snake_case = output.images snake_case = pipe( **self.get_dummy_inputs(lowercase_ ), return_dict=lowercase_, )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case = np.array( [0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self ) -> List[str]: snake_case = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' ) snake_case = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) snake_case = init_image.resize((512, 512) ) snake_case = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/hint_image_cat.png' ) snake_case = torch.from_numpy(np.array(lowercase_ ) ).float() / 255.0 snake_case = hint.permute(2, 0, 1 ).unsqueeze(0 ) snake_case = 'A robot, 4k photo' snake_case = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa ) pipe_prior.to(lowercase_ ) snake_case = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa ) snake_case = pipeline.to(lowercase_ ) pipeline.set_progress_bar_config(disable=lowercase_ ) snake_case = torch.Generator(device='cpu' ).manual_seed(0 ) snake_case , snake_case = pipe_prior( lowercase_, image=lowercase_, strength=0.85, generator=lowercase_, negative_prompt='', ).to_tuple() snake_case = pipeline( image=lowercase_, image_embeds=lowercase_, negative_image_embeds=lowercase_, hint=lowercase_, generator=lowercase_, num_inference_steps=100, height=512, width=512, strength=0.5, output_type='np', ) snake_case = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(lowercase_, lowercase_ )
332
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu lowerCAmelCase_ = False class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCamelCase ( self ) -> List[Any]: return 12 @property def _lowerCamelCase ( self ) -> Dict: return 12 @property def _lowerCamelCase ( self ) -> List[Any]: return 32 @property def _lowerCamelCase ( self ) -> List[Any]: torch.manual_seed(0 ) snake_case = VQModel( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, ) return model @property def _lowerCamelCase ( self ) -> List[Any]: snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def _lowerCamelCase ( self ) -> Tuple: torch.manual_seed(0 ) snake_case = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(lowercase_ ) @property def _lowerCamelCase ( self ) -> str: torch.manual_seed(0 ) snake_case = 12 snake_case = 12 snake_case = { 'attention_bias': True, 'cross_attention_dim': 32, 'attention_head_dim': height * width, 'num_attention_heads': 1, 'num_vector_embeds': self.num_embed, 'num_embeds_ada_norm': self.num_embeds_ada_norm, 'norm_num_groups': 32, 'sample_size': width, 'activation_fn': 'geglu-approximate', } snake_case = TransformeraDModel(**lowercase_ ) return model def _lowerCamelCase ( self ) -> Tuple: snake_case = 'cpu' snake_case = self.dummy_vqvae snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_transformer snake_case = VQDiffusionScheduler(self.num_embed ) snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ ) snake_case = VQDiffusionPipeline( vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'teddy bear playing in the pool' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( [prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = 'cpu' snake_case = self.dummy_vqvae snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_transformer snake_case = VQDiffusionScheduler(self.num_embed ) snake_case = LearnedClassifierFreeSamplingEmbeddings( learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length ) snake_case = VQDiffusionPipeline( vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'teddy bear playing in the pool' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( [prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self ) -> str: snake_case = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' ) snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' ) snake_case = pipeline.to(lowercase_ ) pipeline.set_progress_bar_config(disable=lowercase_ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipeline( 'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', ) snake_case = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
332
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase ( metaclass=__lowerCAmelCase ): snake_case_ = ['''note_seq'''] def __init__( self, *lowercase_, **lowercase_ ) -> str: requires_backends(self, ['note_seq'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]: requires_backends(cls, ['note_seq'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]: requires_backends(cls, ['note_seq'] )
332
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase ( metaclass=__lowerCAmelCase ): snake_case_ = ['''note_seq'''] def __init__( self, *lowercase_, **lowercase_ ) -> str: requires_backends(self, ['note_seq'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]: requires_backends(cls, ['note_seq'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]: requires_backends(cls, ['note_seq'] )
332
1
'''simple docstring''' def __magic_name__ ( ) -> Tuple: snake_case = [] snake_case = 1 while len(A ) < 1E6: constant.append(str(A ) ) i += 1 snake_case = ''.join(A ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[9_9] ) * int(constant[9_9_9] ) * int(constant[9_9_9_9] ) * int(constant[9_9_9_9_9] ) * int(constant[9_9_9_9_9_9] ) ) if __name__ == "__main__": print(solution())
332
'''simple docstring''' import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor lowerCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase ( __lowerCAmelCase ): def __init__( self, *lowercase_, **lowercase_ ) -> None: warnings.warn( 'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DPTImageProcessor instead.', lowercase_, ) super().__init__(*lowercase_, **lowercase_ )
332
1
'''simple docstring''' from collections.abc import Callable def __magic_name__ ( A , A , A ) -> float: snake_case = a snake_case = b if function(A ) == 0: # one of the a or b is a root for the function return a elif function(A ) == 0: return b elif ( function(A ) * function(A ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: snake_case = start + (end - start) / 2.0 while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7 if function(A ) == 0: return mid elif function(A ) * function(A ) < 0: snake_case = mid else: snake_case = mid snake_case = start + (end - start) / 2.0 return mid def __magic_name__ ( A ) -> float: return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_0_0_0)) import doctest doctest.testmod()
332
'''simple docstring''' import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset lowerCAmelCase_ = pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) lowerCAmelCase_ = dataset.iloc[:, 1:2].values lowerCAmelCase_ = dataset.iloc[:, 2].values lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0) lowerCAmelCase_ = PolynomialFeatures(degree=4) lowerCAmelCase_ = poly_reg.fit_transform(X) lowerCAmelCase_ = LinearRegression() pol_reg.fit(X_poly, y) def __magic_name__ ( ) -> Any: plt.scatter(A , A , color='red' ) plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' ) plt.title('Truth or Bluff (Linear Regression)' ) plt.xlabel('Position level' ) plt.ylabel('Salary' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
332
1
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process lowerCAmelCase_ = logging.getLogger(__name__) def __magic_name__ ( A , A ) -> str: return (preds == labels).mean() @dataclass class lowerCamelCase : snake_case_ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class lowerCamelCase : snake_case_ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} ) snake_case_ = field(metadata={'''help''': '''Should contain the data files for the task.'''} ) snake_case_ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def __magic_name__ ( ) -> List[str]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) snake_case , snake_case , snake_case = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , A ) # Set seed set_seed(training_args.seed ) try: snake_case = processors[data_args.task_name]() snake_case = processor.get_labels() snake_case = len(A ) except KeyError: raise ValueError('Task not found: %s' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) snake_case = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , ) # Get datasets snake_case = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=A , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) snake_case = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=A , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(A ) -> Dict: snake_case = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(A , p.label_ids )} # Data collator snake_case = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer snake_case = Trainer( model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation snake_case = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) snake_case = trainer.evaluate() snake_case = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_master(): with open(A , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , A , A ) writer.write('%s = %s\n' % (key, value) ) results.update(A ) return results def __magic_name__ ( A ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
332
'''simple docstring''' import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''''' snake_case_ = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) snake_case_ = None # compression type in fsspec. ex: "gzip" snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str: super().__init__(self, **lowercase_ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode snake_case = fsspec.open( lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={ 'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459 'trust_env': True, # Enable reading proxy env variables. **(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed. }, **(target_options or {}), ) snake_case = os.path.basename(self.file.path.split('::' )[0] ) snake_case = ( self.compressed_name[: self.compressed_name.rindex('.' )] if '.' in self.compressed_name else self.compressed_name ) snake_case = None @classmethod def _lowerCamelCase ( cls, lowercase_ ) -> Any: # compressed file paths are always relative to the archive root return super()._strip_protocol(lowercase_ ).lstrip('/' ) def _lowerCamelCase ( self ) -> Optional[Any]: if self.dir_cache is None: snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name} snake_case = {f['name']: f} def _lowerCamelCase ( self, lowercase_ ) -> str: return self.file.open().read() def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any: snake_case = self._strip_protocol(lowercase_ ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''bz2''' snake_case_ = '''bz2''' snake_case_ = '''.bz2''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''gzip''' snake_case_ = '''gzip''' snake_case_ = '''.gz''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''lz4''' snake_case_ = '''lz4''' snake_case_ = '''.lz4''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''xz''' snake_case_ = '''xz''' snake_case_ = '''.xz''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''zstd''' snake_case_ = '''zstd''' snake_case_ = '''.zst''' def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]: super().__init__( fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 snake_case = self.file.__enter__ class lowerCamelCase : def __init__( self, lowercase_ ) -> List[Any]: snake_case = file_ def __enter__( self ) -> Dict: self._file.__enter__() return self def __exit__( self, *lowercase_, **lowercase_ ) -> Dict: self._file.__exit__(*lowercase_, **lowercase_ ) def __iter__( self ) -> List[str]: return iter(self._file ) def _lowerCamelCase ( self ) -> List[str]: return next(self._file ) def __getattr__( self, lowercase_ ) -> List[Any]: return getattr(self._file, lowercase_ ) def fixed_enter(*lowercase_, **lowercase_ ): return WrappedFile(_enter(*lowercase_, **lowercase_ ) ) snake_case = fixed_enter
332
1
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ): @register_to_config def __init__( self, lowercase_ = 768, ) -> str: super().__init__() snake_case = nn.Parameter(torch.zeros(1, lowercase_ ) ) snake_case = nn.Parameter(torch.ones(1, lowercase_ ) ) def _lowerCamelCase ( self, lowercase_ = None, lowercase_ = None, ) -> Optional[int]: snake_case = nn.Parameter(self.mean.to(lowercase_ ).to(lowercase_ ) ) snake_case = nn.Parameter(self.std.to(lowercase_ ).to(lowercase_ ) ) return self def _lowerCamelCase ( self, lowercase_ ) -> Tuple: snake_case = (embeds - self.mean) * 1.0 / self.std return embeds def _lowerCamelCase ( self, lowercase_ ) -> Optional[int]: snake_case = (embeds * self.std) + self.mean return embeds
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A , A , A ) -> int | float: if len(A ) == 0: raise ValueError('find_max() arg is an empty sequence' ) if ( left >= len(A ) or left < -len(A ) or right >= len(A ) or right < -len(A ) ): raise IndexError('list index out of range' ) if left == right: return nums[left] snake_case = (left + right) >> 1 # the middle snake_case = find_max(A , A , A ) # find max in range[left, mid] snake_case = find_max(A , mid + 1 , A ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
332
1
'''simple docstring''' import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion" ) lowerCAmelCase_ = None lowerCAmelCase_ = { "7B": 1_1_0_0_8, "13B": 1_3_8_2_4, "30B": 1_7_9_2_0, "65B": 2_2_0_1_6, "70B": 2_8_6_7_2, } lowerCAmelCase_ = { "7B": 1, "7Bf": 1, "13B": 2, "13Bf": 2, "30B": 4, "65B": 8, "70B": 8, "70Bf": 8, } def __magic_name__ ( A , A=1 , A=2_5_6 ) -> Tuple: return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def __magic_name__ ( A ) -> List[str]: with open(A , 'r' ) as f: return json.load(A ) def __magic_name__ ( A , A ) -> str: with open(A , 'w' ) as f: json.dump(A , A ) def __magic_name__ ( A , A , A , A=True ) -> Optional[Any]: os.makedirs(A , exist_ok=A ) snake_case = os.path.join(A , 'tmp' ) os.makedirs(A , exist_ok=A ) snake_case = read_json(os.path.join(A , 'params.json' ) ) snake_case = NUM_SHARDS[model_size] snake_case = params['n_layers'] snake_case = params['n_heads'] snake_case = n_heads // num_shards snake_case = params['dim'] snake_case = dim // n_heads snake_case = 10_000.0 snake_case = 1.0 / (base ** (torch.arange(0 , A , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: snake_case = params['n_kv_heads'] # for GQA / MQA snake_case = n_heads_per_shard // num_key_value_heads snake_case = dim // num_key_value_heads else: # compatibility with other checkpoints snake_case = n_heads snake_case = n_heads_per_shard snake_case = dim # permute for sliced rotary def permute(A , A=n_heads , A=dim , A=dim ): return w.view(A , dima // n_heads // 2 , 2 , A ).transpose(1 , 2 ).reshape(A , A ) print(F'''Fetching all parameters from the checkpoint at {input_base_path}.''' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) snake_case = torch.load(os.path.join(A , 'consolidated.00.pth' ) , map_location='cpu' ) else: # Sharded snake_case = [ torch.load(os.path.join(A , F'''consolidated.{i:02d}.pth''' ) , map_location='cpu' ) for i in range(A ) ] snake_case = 0 snake_case = {'weight_map': {}} for layer_i in range(A ): snake_case = F'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded snake_case = { F'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute( loaded[F'''layers.{layer_i}.attention.wq.weight'''] ), F'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute( loaded[F'''layers.{layer_i}.attention.wk.weight'''] ), F'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[F'''layers.{layer_i}.attention.wv.weight'''], F'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[F'''layers.{layer_i}.attention.wo.weight'''], F'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w1.weight'''], F'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w2.weight'''], F'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w3.weight'''], F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[F'''layers.{layer_i}.attention_norm.weight'''], F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[F'''layers.{layer_i}.ffn_norm.weight'''], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. snake_case = { F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][ F'''layers.{layer_i}.attention_norm.weight''' ].clone(), F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][ F'''layers.{layer_i}.ffn_norm.weight''' ].clone(), } snake_case = permute( torch.cat( [ loaded[i][F'''layers.{layer_i}.attention.wq.weight'''].view(A , A , A ) for i in range(A ) ] , dim=0 , ).reshape(A , A ) ) snake_case = permute( torch.cat( [ loaded[i][F'''layers.{layer_i}.attention.wk.weight'''].view( A , A , A ) for i in range(A ) ] , dim=0 , ).reshape(A , A ) , A , A , A , ) snake_case = torch.cat( [ loaded[i][F'''layers.{layer_i}.attention.wv.weight'''].view( A , A , A ) for i in range(A ) ] , dim=0 , ).reshape(A , A ) snake_case = torch.cat( [loaded[i][F'''layers.{layer_i}.attention.wo.weight'''] for i in range(A )] , dim=1 ) snake_case = torch.cat( [loaded[i][F'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(A )] , dim=0 ) snake_case = torch.cat( [loaded[i][F'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(A )] , dim=1 ) snake_case = torch.cat( [loaded[i][F'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(A )] , dim=0 ) snake_case = inv_freq for k, v in state_dict.items(): snake_case = filename param_count += v.numel() torch.save(A , os.path.join(A , A ) ) snake_case = F'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded snake_case = { 'model.embed_tokens.weight': loaded['tok_embeddings.weight'], 'model.norm.weight': loaded['norm.weight'], 'lm_head.weight': loaded['output.weight'], } else: snake_case = { 'model.norm.weight': loaded[0]['norm.weight'], 'model.embed_tokens.weight': torch.cat( [loaded[i]['tok_embeddings.weight'] for i in range(A )] , dim=1 ), 'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(A )] , dim=0 ), } for k, v in state_dict.items(): snake_case = filename param_count += v.numel() torch.save(A , os.path.join(A , A ) ) # Write configs snake_case = {'total_size': param_count * 2} write_json(A , os.path.join(A , 'pytorch_model.bin.index.json' ) ) snake_case = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1 snake_case = params['multiple_of'] if 'multiple_of' in params else 2_5_6 snake_case = LlamaConfig( hidden_size=A , intermediate_size=compute_intermediate_size(A , A , A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=A , ) config.save_pretrained(A ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('Loading the checkpoint in a Llama model.' ) snake_case = LlamaForCausalLM.from_pretrained(A , torch_dtype=torch.floataa , low_cpu_mem_usage=A ) # Avoid saving this as part of the config. del model.config._name_or_path print('Saving in the Transformers format.' ) model.save_pretrained(A , safe_serialization=A ) shutil.rmtree(A ) def __magic_name__ ( A , A ) -> Tuple: # Initialize the tokenizer based on the `spm` model snake_case = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(F'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' ) snake_case = tokenizer_class(A ) tokenizer.save_pretrained(A ) def __magic_name__ ( ) -> List[str]: snake_case = argparse.ArgumentParser() parser.add_argument( '--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , ) parser.add_argument( '--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , ) parser.add_argument( '--output_dir' , help='Location to write HF model and tokenizer' , ) parser.add_argument('--safe_serialization' , type=A , help='Whether or not to save using `safetensors`.' ) snake_case = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) snake_case = os.path.join(args.input_dir , 'tokenizer.model' ) write_tokenizer(args.output_dir , A ) if __name__ == "__main__": main()
332
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = 42 class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ): @register_to_config def __init__( self, lowercase_ = 3, lowercase_ = 3, lowercase_ = ("DownEncoderBlock2D",), lowercase_ = ("UpDecoderBlock2D",), lowercase_ = (64,), lowercase_ = 1, lowercase_ = "silu", lowercase_ = 3, lowercase_ = 32, lowercase_ = 256, lowercase_ = 32, lowercase_ = None, lowercase_ = 0.18_215, lowercase_ = "group", ) -> str: super().__init__() # pass init params to Encoder snake_case = Encoder( in_channels=lowercase_, out_channels=lowercase_, down_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, double_z=lowercase_, ) snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels snake_case = nn.Convad(lowercase_, lowercase_, 1 ) snake_case = VectorQuantizer(lowercase_, lowercase_, beta=0.25, remap=lowercase_, sane_index_shape=lowercase_ ) snake_case = nn.Convad(lowercase_, lowercase_, 1 ) # pass init params to Decoder snake_case = Decoder( in_channels=lowercase_, out_channels=lowercase_, up_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, norm_type=lowercase_, ) @apply_forward_hook def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> VQEncoderOutput: snake_case = self.encoder(lowercase_ ) snake_case = self.quant_conv(lowercase_ ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowercase_ ) @apply_forward_hook def _lowerCamelCase ( self, lowercase_, lowercase_ = False, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: # also go through quantization layer if not force_not_quantize: snake_case , snake_case , snake_case = self.quantize(lowercase_ ) else: snake_case = h snake_case = self.post_quant_conv(lowercase_ ) snake_case = self.decoder(lowercase_, quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: snake_case = sample snake_case = self.encode(lowercase_ ).latents snake_case = self.decode(lowercase_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowercase_ )
332
1
'''simple docstring''' import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def __magic_name__ ( A ) -> str: return EnvironmentCommand() def __magic_name__ ( A ) -> int: return EnvironmentCommand(args.accelerate_config_file ) class lowerCamelCase ( __lowerCAmelCase ): @staticmethod def _lowerCamelCase ( lowercase_ ) -> Dict: snake_case = parser.add_parser('env' ) download_parser.set_defaults(func=lowercase_ ) download_parser.add_argument( '--accelerate-config_file', default=lowercase_, help='The accelerate config file to use for the default values in the launching script.', ) download_parser.set_defaults(func=lowercase_ ) def __init__( self, lowercase_, *lowercase_ ) -> None: snake_case = accelerate_config_file def _lowerCamelCase ( self ) -> List[Any]: snake_case = 'not installed' if is_safetensors_available(): import safetensors snake_case = safetensors.__version__ elif importlib.util.find_spec('safetensors' ) is not None: import safetensors snake_case = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.''' snake_case = 'not installed' snake_case = snake_case = 'not found' if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file snake_case = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(lowercase_ ): snake_case = load_config_from_file(self._accelerate_config_file ).to_dict() snake_case = ( '\n'.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(lowercase_, lowercase_ ) else F'''\t{accelerate_config}''' ) snake_case = 'not installed' snake_case = 'NA' if is_torch_available(): import torch snake_case = torch.__version__ snake_case = torch.cuda.is_available() snake_case = 'not installed' snake_case = 'NA' if is_tf_available(): import tensorflow as tf snake_case = tf.__version__ try: # deprecated in v2.1 snake_case = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool snake_case = bool(tf.config.list_physical_devices('GPU' ) ) snake_case = 'not installed' snake_case = 'not installed' snake_case = 'not installed' snake_case = 'NA' if is_flax_available(): import flax import jax import jaxlib snake_case = flax.__version__ snake_case = jax.__version__ snake_case = jaxlib.__version__ snake_case = jax.lib.xla_bridge.get_backend().platform snake_case = { '`transformers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'Huggingface_hub version': huggingface_hub.__version__, 'Safetensors version': F'''{safetensors_version}''', 'Accelerate version': F'''{accelerate_version}''', 'Accelerate config': F'''{accelerate_config_str}''', 'PyTorch version (GPU?)': F'''{pt_version} ({pt_cuda_available})''', 'Tensorflow version (GPU?)': F'''{tf_version} ({tf_cuda_available})''', 'Flax version (CPU?/GPU?/TPU?)': F'''{flax_version} ({jax_backend})''', 'Jax version': F'''{jax_version}''', 'JaxLib version': F'''{jaxlib_version}''', 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(lowercase_ ) ) return info @staticmethod def _lowerCamelCase ( lowercase_ ) -> int: return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
332
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int: snake_case = [0] snake_case = 42 for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target snake_case = 0 # the area corresponding to the grid that gives the product closest to target snake_case = 0 # an estimate of b, using the quadratic formula snake_case = 42 # the largest integer less than b_estimate snake_case = 42 # the largest integer less than b_estimate snake_case = 42 # the triangle number corresponding to b_floor snake_case = 42 # the triangle number corresponding to b_ceil snake_case = 42 for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 snake_case = floor(A ) snake_case = ceil(A ) snake_case = triangle_numbers[b_floor] snake_case = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): snake_case = triangle_b_first_guess * triangle_a snake_case = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): snake_case = triangle_b_second_guess * triangle_a snake_case = idx_a * b_ceil return area if __name__ == "__main__": print(f"{solution() = }")
332
1
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int: snake_case = [0] snake_case = 42 for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target snake_case = 0 # the area corresponding to the grid that gives the product closest to target snake_case = 0 # an estimate of b, using the quadratic formula snake_case = 42 # the largest integer less than b_estimate snake_case = 42 # the largest integer less than b_estimate snake_case = 42 # the triangle number corresponding to b_floor snake_case = 42 # the triangle number corresponding to b_ceil snake_case = 42 for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 snake_case = floor(A ) snake_case = ceil(A ) snake_case = triangle_numbers[b_floor] snake_case = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): snake_case = triangle_b_first_guess * triangle_a snake_case = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): snake_case = triangle_b_second_guess * triangle_a snake_case = idx_a * b_ceil return area if __name__ == "__main__": print(f"{solution() = }")
332
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = { "configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"], "processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["VisionTextDualEncoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["FlaxVisionTextDualEncoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["TFVisionTextDualEncoderModel"] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
332
1
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowerCAmelCase_ = 2_5_0_0_0_4 lowerCAmelCase_ = 2_5_0_0_2_0 @require_sentencepiece @require_tokenizers class lowerCamelCase ( __lowerCAmelCase , unittest.TestCase ): snake_case_ = MBartaaTokenizer snake_case_ = MBartaaTokenizerFast snake_case_ = True snake_case_ = True def _lowerCamelCase ( self ) -> List[Any]: super().setUp() # We have a SentencePiece fixture for testing snake_case = MBartaaTokenizer(lowercase_, src_lang='en_XX', tgt_lang='ro_RO', keep_accents=lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def _lowerCamelCase ( self ) -> str: snake_case = '<s>' snake_case = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ), lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ), lowercase_ ) def _lowerCamelCase ( self ) -> List[str]: snake_case = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '<s>' ) self.assertEqual(vocab_keys[1], '<pad>' ) self.assertEqual(vocab_keys[-1], '<mask>' ) self.assertEqual(len(lowercase_ ), 1054 ) def _lowerCamelCase ( self ) -> Optional[int]: self.assertEqual(self.get_tokenizer().vocab_size, 1054 ) def _lowerCamelCase ( self ) -> Union[str, Any]: snake_case = MBartaaTokenizer(lowercase_, src_lang='en_XX', tgt_lang='ro_RO', keep_accents=lowercase_ ) snake_case = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowercase_, ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase_ ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) snake_case = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowercase_, [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'], ) snake_case = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual( lowercase_, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ], ) snake_case = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_, [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'], ) @slow def _lowerCamelCase ( self ) -> str: # fmt: off snake_case = {'input_ids': [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase_, model_name='facebook/mbart-large-50', revision='d3913889c59cd5c9e456b269c376325eabad57e2', ) def _lowerCamelCase ( self ) -> Dict: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return snake_case = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case = self.rust_tokenizer_class.from_pretrained(lowercase_, **lowercase_ ) snake_case = self.tokenizer_class.from_pretrained(lowercase_, **lowercase_ ) snake_case = tempfile.mkdtemp() snake_case = tokenizer_r.save_pretrained(lowercase_ ) snake_case = tokenizer_p.save_pretrained(lowercase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) snake_case = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(lowercase_, lowercase_ ) # Checks everything loads correctly in the same way snake_case = tokenizer_r.from_pretrained(lowercase_ ) snake_case = tokenizer_p.from_pretrained(lowercase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase_, lowercase_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowercase_ ) # Save tokenizer rust, legacy_format=True snake_case = tempfile.mkdtemp() snake_case = tokenizer_r.save_pretrained(lowercase_, legacy_format=lowercase_ ) snake_case = tokenizer_p.save_pretrained(lowercase_ ) # Checks it save with the same files self.assertSequenceEqual(lowercase_, lowercase_ ) # Checks everything loads correctly in the same way snake_case = tokenizer_r.from_pretrained(lowercase_ ) snake_case = tokenizer_p.from_pretrained(lowercase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase_, lowercase_ ) ) shutil.rmtree(lowercase_ ) # Save tokenizer rust, legacy_format=False snake_case = tempfile.mkdtemp() snake_case = tokenizer_r.save_pretrained(lowercase_, legacy_format=lowercase_ ) snake_case = tokenizer_p.save_pretrained(lowercase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way snake_case = tokenizer_r.from_pretrained(lowercase_ ) snake_case = tokenizer_p.from_pretrained(lowercase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase_, lowercase_ ) ) shutil.rmtree(lowercase_ ) @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase ( unittest.TestCase ): snake_case_ = '''facebook/mbart-large-50-one-to-many-mmt''' snake_case_ = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''', ] snake_case_ = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei''' ''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor''' ''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] snake_case_ = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2] @classmethod def _lowerCamelCase ( cls ) -> str: snake_case = MBartaaTokenizer.from_pretrained( cls.checkpoint_name, src_lang='en_XX', tgt_lang='ro_RO' ) snake_case = 1 return cls def _lowerCamelCase ( self ) -> str: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'], 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'], 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'], 250020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'], 250038 ) def _lowerCamelCase ( self ) -> Optional[int]: snake_case = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens, lowercase_ ) def _lowerCamelCase ( self ) -> List[Any]: self.assertIn(lowercase_, self.tokenizer.all_special_ids ) snake_case = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] snake_case = self.tokenizer.decode(lowercase_, skip_special_tokens=lowercase_ ) snake_case = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowercase_ ) self.assertEqual(lowercase_, lowercase_ ) self.assertNotIn(self.tokenizer.eos_token, lowercase_ ) def _lowerCamelCase ( self ) -> List[Any]: snake_case = ['this is gunna be a long sentence ' * 20] assert isinstance(src_text[0], lowercase_ ) snake_case = 10 snake_case = self.tokenizer(lowercase_, max_length=lowercase_, truncation=lowercase_ ).input_ids[0] self.assertEqual(ids[0], lowercase_ ) self.assertEqual(ids[-1], 2 ) self.assertEqual(len(lowercase_ ), lowercase_ ) def _lowerCamelCase ( self ) -> Optional[Any]: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ), [250053, 250001] ) def _lowerCamelCase ( self ) -> int: snake_case = tempfile.mkdtemp() snake_case = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowercase_ ) snake_case = MBartaaTokenizer.from_pretrained(lowercase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowercase_ ) @require_torch def _lowerCamelCase ( self ) -> List[str]: snake_case = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowercase_, return_tensors='pt' ) snake_case = shift_tokens_right(batch['labels'], self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def _lowerCamelCase ( self ) -> List[str]: snake_case = self.tokenizer( self.src_text, text_target=self.tgt_text, padding=lowercase_, truncation=lowercase_, max_length=len(self.expected_src_tokens ), return_tensors='pt', ) snake_case = shift_tokens_right(batch['labels'], self.tokenizer.pad_token_id ) self.assertIsInstance(lowercase_, lowercase_ ) self.assertEqual((2, 14), batch.input_ids.shape ) self.assertEqual((2, 14), batch.attention_mask.shape ) snake_case = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens, lowercase_ ) self.assertEqual(2, batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] ) def _lowerCamelCase ( self ) -> Optional[int]: snake_case = self.tokenizer(self.src_text, padding=lowercase_, truncation=lowercase_, max_length=3, return_tensors='pt' ) snake_case = self.tokenizer( text_target=self.tgt_text, padding=lowercase_, truncation=lowercase_, max_length=10, return_tensors='pt' ) snake_case = targets['input_ids'] snake_case = shift_tokens_right(lowercase_, self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1], 3 ) self.assertEqual(batch.decoder_input_ids.shape[1], 10 ) @require_torch def _lowerCamelCase ( self ) -> List[str]: snake_case = self.tokenizer._build_translation_inputs( 'A test', return_tensors='pt', src_lang='en_XX', tgt_lang='ar_AR' ) self.assertEqual( nested_simplify(lowercase_ ), { # en_XX, A, test, EOS 'input_ids': [[250004, 62, 3034, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 250001, }, )
332
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCamelCase : snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} ) snake_case_ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) snake_case_ = field( default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , ) snake_case_ = field( default=64 , metadata={ '''help''': ( '''The maximum number of tokens for the question. Questions longer than this will ''' '''be truncated to this length.''' ) } , ) snake_case_ = field( default=30 , metadata={ '''help''': ( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ) } , ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} ) snake_case_ = field( default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) snake_case_ = field( default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) snake_case_ = field( default=0 , metadata={ '''help''': ( '''language id of input for language-specific xlm models (see''' ''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)''' ) } , ) snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} ) class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''train''' snake_case_ = '''dev''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int: snake_case = args snake_case = is_language_sensitive snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(lowercase_, lowercase_ ): try: snake_case = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) snake_case = mode # Load data features from cache or dataset file snake_case = 'v2' if args.version_2_with_negative else 'v1' snake_case = os.path.join( cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. snake_case = cached_features_file + '.lock' with FileLock(lowercase_ ): if os.path.exists(lowercase_ ) and not args.overwrite_cache: snake_case = time.time() snake_case = torch.load(lowercase_ ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. snake_case = self.old_features['features'] snake_case = self.old_features.get('dataset', lowercase_ ) snake_case = self.old_features.get('examples', lowercase_ ) logger.info( F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' ' future run' ) else: if mode == Split.dev: snake_case = self.processor.get_dev_examples(args.data_dir ) else: snake_case = self.processor.get_train_examples(args.data_dir ) snake_case , snake_case = squad_convert_examples_to_features( examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, ) snake_case = time.time() torch.save( {'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self ) -> Tuple: return len(self.features ) def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]: # Convert to Tensors and build dataset snake_case = self.features[i] snake_case = torch.tensor(feature.input_ids, dtype=torch.long ) snake_case = torch.tensor(feature.attention_mask, dtype=torch.long ) snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long ) snake_case = torch.tensor(feature.cls_index, dtype=torch.long ) snake_case = torch.tensor(feature.p_mask, dtype=torch.float ) snake_case = torch.tensor(feature.is_impossible, dtype=torch.float ) snake_case = { 'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'cls_index': cls_index, 'p_mask': p_mask} ) if self.args.version_2_with_negative: inputs.update({'is_impossible': is_impossible} ) if self.is_language_sensitive: inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: snake_case = torch.tensor(feature.start_position, dtype=torch.long ) snake_case = torch.tensor(feature.end_position, dtype=torch.long ) inputs.update({'start_positions': start_positions, 'end_positions': end_positions} ) return inputs
332
1
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def __magic_name__ ( A , A , A , A , ) -> list[float]: snake_case , snake_case = coefficient_matrix.shape snake_case , snake_case = constant_matrix.shape if rowsa != colsa: snake_case = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}''' raise ValueError(A ) if colsa != 1: snake_case = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}''' raise ValueError(A ) if rowsa != rowsa: snake_case = ( 'Coefficient and constant matrices dimensions must be nxn and nx1 but ' F'''received {rowsa}x{colsa} and {rowsa}x{colsa}''' ) raise ValueError(A ) if len(A ) != rowsa: snake_case = ( 'Number of initial values must be equal to number of rows in coefficient ' F'''matrix but received {len(A )} and {rowsa}''' ) raise ValueError(A ) if iterations <= 0: raise ValueError('Iterations must be at least 1' ) snake_case = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) snake_case , snake_case = table.shape strictly_diagonally_dominant(A ) # Iterates the whole matrix for given number of times for _ in range(A ): snake_case = [] for row in range(A ): snake_case = 0 for col in range(A ): if col == row: snake_case = table[row][col] elif col == cols - 1: snake_case = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] snake_case = (temp + val) / denom new_val.append(A ) snake_case = new_val return [float(A ) for i in new_val] def __magic_name__ ( A ) -> bool: snake_case , snake_case = table.shape snake_case = True for i in range(0 , A ): snake_case = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError('Coefficient matrix is not strictly diagonally dominant' ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
332
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def __magic_name__ ( A , A , A ) -> Any: # Initialise PyTorch model snake_case = BertConfig.from_json_file(A ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case = BertForPreTraining(A ) # Load weights from tf checkpoint load_tf_weights_in_bert(A , A , A ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , A ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--bert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
332
1
'''simple docstring''' import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase : def __init__( self, lowercase_, lowercase_=13, lowercase_=32, lowercase_=3, lowercase_=4, lowercase_=[10, 20, 30, 40], lowercase_=[2, 2, 3, 2], lowercase_=True, lowercase_=True, lowercase_=37, lowercase_="gelu", lowercase_=10, lowercase_=0.02, lowercase_=["stage2", "stage3", "stage4"], lowercase_=3, lowercase_=None, ) -> Union[str, Any]: snake_case = parent snake_case = batch_size snake_case = image_size snake_case = num_channels snake_case = num_stages snake_case = hidden_sizes snake_case = depths snake_case = is_training snake_case = use_labels snake_case = intermediate_size snake_case = hidden_act snake_case = type_sequence_label_size snake_case = initializer_range snake_case = out_features snake_case = num_labels snake_case = scope snake_case = num_stages def _lowerCamelCase ( self ) -> str: snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case = None if self.use_labels: snake_case = ids_tensor([self.batch_size], self.type_sequence_label_size ) snake_case = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self ) -> int: return ConvNextConfig( num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, ) def _lowerCamelCase ( self ) -> Dict: return UperNetConfig( backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=lowercase_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=lowercase_, loss_ignore_index=255, num_labels=self.num_labels, ) def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ ) -> List[str]: snake_case = UperNetForSemanticSegmentation(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case = model(lowercase_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def _lowerCamelCase ( self ) -> str: snake_case = self.prepare_config_and_inputs() ( ( snake_case ) , ( snake_case ) , ( snake_case ) , ) = config_and_inputs snake_case = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): snake_case_ = (UperNetForSemanticSegmentation,) if is_torch_available() else () snake_case_ = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {} snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def _lowerCamelCase ( self ) -> str: snake_case = UperNetModelTester(self ) snake_case = ConfigTester(self, config_class=lowercase_, has_text_modality=lowercase_, hidden_size=37 ) def _lowerCamelCase ( self ) -> str: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self ) -> Any: return def _lowerCamelCase ( self ) -> int: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case = model_class(lowercase_ ) snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case = [*signature.parameters.keys()] snake_case = ['pixel_values'] self.assertListEqual(arg_names[:1], lowercase_ ) def _lowerCamelCase ( self ) -> Optional[int]: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_ ) @unittest.skip(reason='UperNet does not use inputs_embeds' ) def _lowerCamelCase ( self ) -> List[str]: pass @unittest.skip(reason='UperNet does not support input and output embeddings' ) def _lowerCamelCase ( self ) -> int: pass @unittest.skip(reason='UperNet does not have a base model' ) def _lowerCamelCase ( self ) -> Any: pass @unittest.skip(reason='UperNet does not have a base model' ) def _lowerCamelCase ( self ) -> Any: pass @require_torch_multi_gpu @unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def _lowerCamelCase ( self ) -> Optional[Any]: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _lowerCamelCase ( self ) -> Optional[Any]: pass def _lowerCamelCase ( self ) -> Optional[int]: def check_hidden_states_output(lowercase_, lowercase_, lowercase_ ): snake_case = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): snake_case = model(**self._prepare_for_class(lowercase_, lowercase_ ) ) snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case = self.model_tester.num_stages self.assertEqual(len(lowercase_ ), expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case = True check_hidden_states_output(lowercase_, lowercase_, lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case = True check_hidden_states_output(lowercase_, lowercase_, lowercase_ ) def _lowerCamelCase ( self ) -> Any: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() snake_case = _config_zero_init(lowercase_ ) snake_case = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: snake_case = model_class(config=lowercase_ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', ) @unittest.skip(reason='UperNet does not have tied weights' ) def _lowerCamelCase ( self ) -> Optional[int]: pass @slow def _lowerCamelCase ( self ) -> Optional[Any]: for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case = UperNetForSemanticSegmentation.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def __magic_name__ ( ) -> str: snake_case = hf_hub_download( repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' ) snake_case = Image.open(A ).convert('RGB' ) return image @require_torch @require_vision @slow class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Tuple: snake_case = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' ) snake_case = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(lowercase_ ) snake_case = prepare_img() snake_case = processor(images=lowercase_, return_tensors='pt' ).to(lowercase_ ) with torch.no_grad(): snake_case = model(**lowercase_ ) snake_case = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, lowercase_ ) snake_case = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowercase_, atol=1E-4 ) ) def _lowerCamelCase ( self ) -> Dict: snake_case = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' ) snake_case = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(lowercase_ ) snake_case = prepare_img() snake_case = processor(images=lowercase_, return_tensors='pt' ).to(lowercase_ ) with torch.no_grad(): snake_case = model(**lowercase_ ) snake_case = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape, lowercase_ ) snake_case = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowercase_, atol=1E-4 ) )
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A ) -> list: if len(A ) == 0: return [] snake_case , snake_case = min(A ), max(A ) snake_case = int(max_value - min_value ) + 1 snake_case = [[] for _ in range(A )] for i in my_list: buckets[int(i - min_value )].append(A ) return [v for bucket in buckets for v in sorted(A )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
332
1
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def __magic_name__ ( A ) -> List[Any]: snake_case = 3_8_4 if "tiny" in model_name: snake_case = [3, 3, 9, 3] snake_case = [9_6, 1_9_2, 3_8_4, 7_6_8] if "small" in model_name: snake_case = [3, 3, 2_7, 3] snake_case = [9_6, 1_9_2, 3_8_4, 7_6_8] if "base" in model_name: snake_case = [3, 3, 2_7, 3] snake_case = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4] snake_case = 5_1_2 if "large" in model_name: snake_case = [3, 3, 2_7, 3] snake_case = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6] snake_case = 7_6_8 if "xlarge" in model_name: snake_case = [3, 3, 2_7, 3] snake_case = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] snake_case = 1_0_2_4 # set label information snake_case = 1_5_0 snake_case = 'huggingface/label-files' snake_case = 'ade20k-id2label.json' snake_case = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) ) snake_case = {int(A ): v for k, v in idalabel.items()} snake_case = {v: k for k, v in idalabel.items()} snake_case = ConvNextConfig( depths=A , hidden_sizes=A , out_features=['stage1', 'stage2', 'stage3', 'stage4'] ) snake_case = UperNetConfig( backbone_config=A , auxiliary_in_channels=A , num_labels=A , idalabel=A , labelaid=A , ) return config def __magic_name__ ( A ) -> Dict: snake_case = [] # fmt: off # stem rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') ) rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') ) rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') ) rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') ) if i > 0: rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') ) rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') ) rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') ) rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') ) rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ('decode_head.conv_seg.weight', 'decode_head.classifier.weight'), ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'), ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'), ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'), ] ) # fmt: on return rename_keys def __magic_name__ ( A , A , A ) -> int: snake_case = dct.pop(A ) snake_case = val def __magic_name__ ( A , A , A ) -> Optional[int]: snake_case = { 'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth', 'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth', 'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth', 'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth', 'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth', } snake_case = model_name_to_url[model_name] snake_case = torch.hub.load_state_dict_from_url(A , map_location='cpu' )['state_dict'] snake_case = get_upernet_config(A ) snake_case = UperNetForSemanticSegmentation(A ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): snake_case = state_dict.pop(A ) if "bn" in key: snake_case = key.replace('bn' , 'batch_norm' ) snake_case = val # rename keys snake_case = create_rename_keys(A ) for src, dest in rename_keys: rename_key(A , A , A ) model.load_state_dict(A ) # verify on image snake_case = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' snake_case = Image.open(requests.get(A , stream=A ).raw ).convert('RGB' ) snake_case = SegformerImageProcessor() snake_case = processor(A , return_tensors='pt' ).pixel_values with torch.no_grad(): snake_case = model(A ) if model_name == "upernet-convnext-tiny": snake_case = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ) elif model_name == "upernet-convnext-small": snake_case = torch.tensor( [[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] ) elif model_name == "upernet-convnext-base": snake_case = torch.tensor( [[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] ) elif model_name == "upernet-convnext-large": snake_case = torch.tensor( [[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] ) elif model_name == "upernet-convnext-xlarge": snake_case = torch.tensor( [[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] ) print('Logits:' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , A , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(A ) if push_to_hub: print(F'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(F'''openmmlab/{model_name}''' ) processor.push_to_hub(F'''openmmlab/{model_name}''' ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="upernet-convnext-tiny", type=str, choices=[f"upernet-convnext-{size}" for size in ["tiny", "small", "base", "large", "xlarge"]], help="Name of the ConvNext UperNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) lowerCAmelCase_ = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
332
'''simple docstring''' def __magic_name__ ( A ) -> float: return 1_0 - x * x def __magic_name__ ( A , A ) -> float: # Bolzano theory in order to find if there is a root between a and b if equation(A ) * equation(A ) >= 0: raise ValueError('Wrong space!' ) snake_case = a while (b - a) >= 0.01: # Find middle point snake_case = (a + b) / 2 # Check if middle point is root if equation(A ) == 0.0: break # Decide the side to repeat the steps if equation(A ) * equation(A ) < 0: snake_case = c else: snake_case = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
332
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule lowerCAmelCase_ = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
332
'''simple docstring''' import pytest lowerCAmelCase_ = "__dummy_dataset1__" lowerCAmelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n" @pytest.fixture def __magic_name__ ( ) -> List[Any]: return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def __magic_name__ ( ) -> Union[str, Any]: return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def __magic_name__ ( A , A , A ) -> Optional[int]: snake_case = dataset_loading_script_name snake_case = tmp_path / 'datasets' / script_name script_dir.mkdir(parents=A ) snake_case = script_dir / F'''{script_name}.py''' with open(A , 'w' ) as f: f.write(A ) return str(A )
332
1
'''simple docstring''' import math import unittest def __magic_name__ ( A ) -> bool: assert isinstance(A , A ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> List[str]: self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def _lowerCamelCase ( self ) -> List[str]: with self.assertRaises(lowercase_ ): is_prime(-19 ) self.assertFalse( is_prime(0 ), 'Zero doesn\'t have any positive factors, primes must have exactly two.', ) self.assertFalse( is_prime(1 ), 'One only has 1 positive factor, primes must have exactly two.', ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
332
'''simple docstring''' from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time lowerCAmelCase_ = Lock() def __magic_name__ ( A , A , A , A , A , A , A ) -> Any: global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 1_0 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(A ) process_lock.release() # receive your right neighbor's value process_lock.acquire() snake_case = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left snake_case = min(A , A ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(A ) process_lock.release() # receive your left neighbor's value process_lock.acquire() snake_case = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right snake_case = max(A , A ) # after all swaps are performed, send the values back to main result_pipe[1].send(A ) def __magic_name__ ( A ) -> str: snake_case = [] snake_case = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop snake_case = Pipe() snake_case = Pipe() process_array_.append( Process( target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) snake_case = temp_rs snake_case = temp_rr for i in range(1 , len(A ) - 1 ): snake_case = Pipe() snake_case = Pipe() process_array_.append( Process( target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) snake_case = temp_rs snake_case = temp_rr process_array_.append( Process( target=A , args=( len(A ) - 1, arr[len(A ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(A ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(A ) ): snake_case = result_pipe[p][0].recv() process_array_[p].join() return arr def __magic_name__ ( ) -> Tuple: snake_case = list(range(1_0 , 0 , -1 ) ) print('Initial List' ) print(*A ) snake_case = odd_even_transposition(A ) print('Sorted List\n' ) print(*A ) if __name__ == "__main__": main()
332
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor lowerCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase ( __lowerCAmelCase ): def __init__( self, *lowercase_, **lowercase_ ) -> None: warnings.warn( 'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DonutImageProcessor instead.', lowercase_, ) super().__init__(*lowercase_, **lowercase_ )
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A ) -> None: create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] ) def __magic_name__ ( A , A , A , A , ) -> None: if index == len(A ): print(A ) return for i in range(len(A ) ): if not index_used[i]: current_sequence.append(sequence[i] ) snake_case = True create_state_space_tree(A , A , index + 1 , A ) current_sequence.pop() snake_case = False lowerCAmelCase_ = [3, 1, 2, 4] generate_all_permutations(sequence) lowerCAmelCase_ = ["A", "B", "C"] generate_all_permutations(sequence_a)
332
1
'''simple docstring''' import os from collections.abc import Iterator def __magic_name__ ( A = "." ) -> Iterator[str]: for dir_path, dir_names, filenames in os.walk(A ): snake_case = [d for d in dir_names if d != 'scripts' and d[0] not in '._'] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(A )[1] in (".py", ".ipynb"): yield os.path.join(A , A ).lstrip('./' ) def __magic_name__ ( A ) -> Dict: return F'''{i * " "}*''' if i else "\n##" def __magic_name__ ( A , A ) -> str: snake_case = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(A ) or old_parts[i] != new_part) and new_part: print(F'''{md_prefix(A )} {new_part.replace("_" , " " ).title()}''' ) return new_path def __magic_name__ ( A = "." ) -> None: snake_case = '' for filepath in sorted(good_file_paths(A ) ): snake_case , snake_case = os.path.split(A ) if filepath != old_path: snake_case = print_path(A , A ) snake_case = (filepath.count(os.sep ) + 1) if filepath else 0 snake_case = F'''{filepath}/{filename}'''.replace(' ' , '%20' ) snake_case = os.path.splitext(filename.replace('_' , ' ' ).title() )[0] print(F'''{md_prefix(A )} [{filename}]({url})''' ) if __name__ == "__main__": print_directory_md(".")
332
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''roberta''' def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple: super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ ) snake_case = vocab_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = hidden_act snake_case = intermediate_size snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_vocab_size snake_case = initializer_range snake_case = layer_norm_eps snake_case = position_embedding_type snake_case = use_cache snake_case = classifier_dropout class lowerCamelCase ( __lowerCAmelCase ): @property def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
332
1
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCamelCase : snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} ) snake_case_ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) snake_case_ = field( default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , ) snake_case_ = field( default=64 , metadata={ '''help''': ( '''The maximum number of tokens for the question. Questions longer than this will ''' '''be truncated to this length.''' ) } , ) snake_case_ = field( default=30 , metadata={ '''help''': ( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ) } , ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} ) snake_case_ = field( default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) snake_case_ = field( default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) snake_case_ = field( default=0 , metadata={ '''help''': ( '''language id of input for language-specific xlm models (see''' ''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)''' ) } , ) snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} ) class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''train''' snake_case_ = '''dev''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int: snake_case = args snake_case = is_language_sensitive snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(lowercase_, lowercase_ ): try: snake_case = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) snake_case = mode # Load data features from cache or dataset file snake_case = 'v2' if args.version_2_with_negative else 'v1' snake_case = os.path.join( cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. snake_case = cached_features_file + '.lock' with FileLock(lowercase_ ): if os.path.exists(lowercase_ ) and not args.overwrite_cache: snake_case = time.time() snake_case = torch.load(lowercase_ ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. snake_case = self.old_features['features'] snake_case = self.old_features.get('dataset', lowercase_ ) snake_case = self.old_features.get('examples', lowercase_ ) logger.info( F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' ' future run' ) else: if mode == Split.dev: snake_case = self.processor.get_dev_examples(args.data_dir ) else: snake_case = self.processor.get_train_examples(args.data_dir ) snake_case , snake_case = squad_convert_examples_to_features( examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, ) snake_case = time.time() torch.save( {'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self ) -> Tuple: return len(self.features ) def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]: # Convert to Tensors and build dataset snake_case = self.features[i] snake_case = torch.tensor(feature.input_ids, dtype=torch.long ) snake_case = torch.tensor(feature.attention_mask, dtype=torch.long ) snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long ) snake_case = torch.tensor(feature.cls_index, dtype=torch.long ) snake_case = torch.tensor(feature.p_mask, dtype=torch.float ) snake_case = torch.tensor(feature.is_impossible, dtype=torch.float ) snake_case = { 'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'cls_index': cls_index, 'p_mask': p_mask} ) if self.args.version_2_with_negative: inputs.update({'is_impossible': is_impossible} ) if self.is_language_sensitive: inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: snake_case = torch.tensor(feature.start_position, dtype=torch.long ) snake_case = torch.tensor(feature.end_position, dtype=torch.long ) inputs.update({'start_positions': start_positions, 'end_positions': end_positions} ) return inputs
332
'''simple docstring''' import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} lowerCAmelCase_ = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } lowerCAmelCase_ = { "allenai/led-base-16384": 1_6_3_8_4, } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = LEDTokenizer snake_case_ = ['''input_ids''', '''attention_mask'''] def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int: super().__init__( lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, ) snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space: snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) ) snake_case = add_prefix_space snake_case = pre_tok_class(**lowercase_ ) snake_case = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` snake_case = 'post_processor' snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ ) if tokenizer_component_instance: snake_case = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: snake_case = tuple(state['sep'] ) if "cls" in state: snake_case = tuple(state['cls'] ) snake_case = False if state.get('add_prefix_space', lowercase_ ) != add_prefix_space: snake_case = add_prefix_space snake_case = True if state.get('trim_offsets', lowercase_ ) != trim_offsets: snake_case = trim_offsets snake_case = True if changes_to_apply: snake_case = getattr(lowercase_, state.pop('type' ) ) snake_case = component_class(**lowercase_ ) setattr(self.backend_tokenizer, lowercase_, lowercase_ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _lowerCamelCase ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _lowerCamelCase ( self, lowercase_ ) -> Any: snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value snake_case = value def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding: snake_case = kwargs.get('is_split_into_words', lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding: snake_case = kwargs.get('is_split_into_words', lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._encode_plus(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]: snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ ) return tuple(lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict: snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]: snake_case = [self.sep_token_id] snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict: snake_case = super()._pad( encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, ) # Load from model defaults if return_attention_mask is None: snake_case = 'attention_mask' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: snake_case = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ ) if needs_to_be_padded: snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` snake_case = ( encoded_inputs['global_attention_mask'] + [-1] * difference ) elif self.padding_side == "left": snake_case = [-1] * difference + encoded_inputs[ 'global_attention_mask' ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return encoded_inputs
332
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase_ = { "configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"], "tokenization_xlm": ["XLMTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ "XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMForMultipleChoice", "XLMForQuestionAnswering", "XLMForQuestionAnsweringSimple", "XLMForSequenceClassification", "XLMForTokenClassification", "XLMModel", "XLMPreTrainedModel", "XLMWithLMHeadModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", "TFXLMForTokenClassification", "TFXLMMainLayer", "TFXLMModel", "TFXLMPreTrainedModel", "TFXLMWithLMHeadModel", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
332
'''simple docstring''' import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def __magic_name__ ( A ) -> Tuple: snake_case = [] embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', F'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', F'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', F'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', F'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def __magic_name__ ( A , A ) -> Optional[int]: snake_case = [] attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def __magic_name__ ( A ) -> List[Any]: snake_case = [] token.append((F'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') ) return token def __magic_name__ ( ) -> Dict: snake_case = [] head.append(('layernorm.weight', 'norm.weight') ) head.append(('layernorm.bias', 'norm.bias') ) head.append(('classifier.weight', 'head.weight') ) head.append(('classifier.bias', 'head.bias') ) return head def __magic_name__ ( A , A , A , A ) -> int: snake_case = 'imagenet-1k-id2label.json' snake_case = 1_0_0_0 snake_case = 'huggingface/label-files' snake_case = num_labels snake_case = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) ) snake_case = {int(A ): v for k, v in idalabel.items()} snake_case = idalabel snake_case = {v: k for k, v in idalabel.items()} snake_case = snake_case = CvtConfig(num_labels=A , idalabel=A , labelaid=A ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13": snake_case = [1, 2, 1_0] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21": snake_case = [1, 4, 1_6] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: snake_case = [2, 2, 2_0] snake_case = [3, 1_2, 1_6] snake_case = [1_9_2, 7_6_8, 1_0_2_4] snake_case = CvtForImageClassification(A ) snake_case = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) snake_case = image_size snake_case = torch.load(A , map_location=torch.device('cpu' ) ) snake_case = OrderedDict() snake_case = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: snake_case = list_of_state_dict + cls_token(A ) snake_case = list_of_state_dict + embeddings(A ) for cnt in range(config.depth[idx] ): snake_case = list_of_state_dict + attention(A , A ) snake_case = list_of_state_dict + final() for gg in list_of_state_dict: print(A ) for i in range(len(A ) ): snake_case = original_weights[list_of_state_dict[i][1]] model.load_state_dict(A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( "--cvt_model", default="cvt-w24", type=str, help="Name of the cvt model you'd like to convert.", ) parser.add_argument( "--image_size", default=3_8_4, type=int, help="Input Image Size", ) parser.add_argument( "--cvt_file_name", default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth", type=str, help="Input Image Size", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) lowerCAmelCase_ = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
332
1
'''simple docstring''' def __magic_name__ ( A , A ) -> Any: _enforce_args(A , A ) if n == 0: return 0 snake_case = float('-inf' ) for i in range(1 , n + 1 ): snake_case = max( A , prices[i - 1] + naive_cut_rod_recursive(n - i , A ) ) return max_revue def __magic_name__ ( A , A ) -> Optional[Any]: _enforce_args(A , A ) snake_case = [float('-inf' ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(A , A , A ) def __magic_name__ ( A , A , A ) -> int: if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: snake_case = float('-inf' ) for i in range(1 , n + 1 ): snake_case = max( A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , A , A ) , ) snake_case = max_revenue return max_rev[n] def __magic_name__ ( A , A ) -> Dict: _enforce_args(A , A ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. snake_case = [float('-inf' ) for _ in range(n + 1 )] snake_case = 0 for i in range(1 , n + 1 ): snake_case = max_rev[i] for j in range(1 , i + 1 ): snake_case = max(A , prices[j - 1] + max_rev[i - j] ) snake_case = max_revenue_i return max_rev[n] def __magic_name__ ( A , A ) -> str: if n < 0: snake_case = F'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(A ) if n > len(A ): snake_case = ( 'Each integral piece of rod must have a corresponding price. ' F'''Got n = {n} but length of prices = {len(A )}''' ) raise ValueError(A ) def __magic_name__ ( ) -> Union[str, Any]: snake_case = [6, 1_0, 1_2, 1_5, 2_0, 2_3] snake_case = len(A ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. snake_case = 3_6 snake_case = top_down_cut_rod(A , A ) snake_case = bottom_up_cut_rod(A , A ) snake_case = naive_cut_rod_recursive(A , A ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
332
'''simple docstring''' from pathlib import Path import fire def __magic_name__ ( A , A , A ) -> Union[str, Any]: snake_case = Path(A ) snake_case = Path(A ) dest_dir.mkdir(exist_ok=A ) for path in src_dir.iterdir(): snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n] snake_case = dest_dir.joinpath(path.name ) print(A ) dest_path.open('w' ).write('\n'.join(A ) ) if __name__ == "__main__": fire.Fire(minify)
332
1
'''simple docstring''' import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) lowerCAmelCase_ = logging.getLogger() def __magic_name__ ( A ) -> int: snake_case = {} snake_case = os.path.join(A , 'all_results.json' ) if os.path.exists(A ): with open(A , 'r' ) as f: snake_case = json.load(A ) else: raise ValueError(F'''can\'t find {path}''' ) return results lowerCAmelCase_ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class lowerCamelCase ( __lowerCAmelCase ): def _lowerCamelCase ( self ) -> str: import xla_spawn snake_case = self.get_auto_remove_tmp_dir() snake_case = F''' ./examples/pytorch/text-classification/run_glue.py --num_cores=8 ./examples/pytorch/text-classification/run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train --do_eval --debug tpu_metrics_debug --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --max_steps=10 --warmup_steps=2 --seed=42 --max_seq_length=128 '''.split() with patch.object(lowercase_, 'argv', lowercase_ ): snake_case = time() xla_spawn.main() snake_case = time() snake_case = get_results(lowercase_ ) self.assertGreaterEqual(result['eval_accuracy'], 0.75 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start, 500 ) def _lowerCamelCase ( self ) -> Any: import xla_spawn snake_case = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split() with patch.object(lowercase_, 'argv', lowercase_ ): xla_spawn.main()
332
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) lowerCAmelCase_ = pytest.mark.integration @pytest.mark.parametrize('path' , ['paws', 'csv'] ) def __magic_name__ ( A , A ) -> Union[str, Any]: inspect_dataset(A , A ) snake_case = path + '.py' assert script_name in os.listdir(A ) assert "__pycache__" not in os.listdir(A ) @pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.parametrize('path' , ['accuracy'] ) def __magic_name__ ( A , A ) -> int: inspect_metric(A , A ) snake_case = path + '.py' assert script_name in os.listdir(A ) assert "__pycache__" not in os.listdir(A ) @pytest.mark.parametrize( 'path, config_name, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> List[str]: snake_case = get_dataset_config_info(A , config_name=A ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __magic_name__ ( A , A , A ) -> Any: with pytest.raises(A ): get_dataset_config_info(A , config_name=A ) @pytest.mark.parametrize( 'path, expected' , [ ('squad', 'plain_text'), ('acronym_identification', 'default'), ('lhoestq/squad', 'plain_text'), ('lhoestq/test', 'default'), ('lhoestq/demo1', 'lhoestq--demo1'), ('dalle-mini/wit', 'dalle-mini--wit'), ] , ) def __magic_name__ ( A , A ) -> Dict: snake_case = get_dataset_config_names(A ) assert expected in config_names @pytest.mark.parametrize( 'path, expected_configs, expected_splits_in_first_config' , [ ('squad', ['plain_text'], ['train', 'validation']), ('dalle-mini/wit', ['dalle-mini--wit'], ['train']), ('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> List[str]: snake_case = get_dataset_infos(A ) assert list(infos.keys() ) == expected_configs snake_case = expected_configs[0] assert expected_config in infos snake_case = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( 'path, expected_config, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> Any: snake_case = get_dataset_infos(A ) assert expected_config in infos snake_case = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __magic_name__ ( A , A , A ) -> int: with pytest.raises(A ): get_dataset_split_names(A , config_name=A )
332
1
'''simple docstring''' from __future__ import annotations from math import gcd def __magic_name__ ( A , A = 2 , A = 1 , A = 3 , ) -> int | None: # A value less than 2 can cause an infinite loop in the algorithm. if num < 2: raise ValueError('The input value cannot be less than 2' ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(A , A , A ) -> int: return (pow(A , 2 ) + step) % modulus for _ in range(A ): # These track the position within the cycle detection logic. snake_case = seed snake_case = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. snake_case = rand_fn(A , A , A ) snake_case = rand_fn(A , A , A ) snake_case = rand_fn(A , A , A ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. snake_case = gcd(hare - tortoise , A ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. snake_case = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( "num", type=int, help="The value to find a divisor of", ) parser.add_argument( "--attempts", type=int, default=3, help="The number of attempts before giving up", ) lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(f"{args.num} is probably prime") else: lowerCAmelCase_ = args.num // divisor print(f"{args.num} = {divisor} * {quotient}")
332
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { "configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"], "processing_git": ["GitProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ "GIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GitForCausalLM", "GitModel", "GitPreTrainedModel", "GitVisionModel", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
332
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { "configuration_xmod": [ "XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP", "XmodConfig", "XmodOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ "XMOD_PRETRAINED_MODEL_ARCHIVE_LIST", "XmodForCausalLM", "XmodForMaskedLM", "XmodForMultipleChoice", "XmodForQuestionAnswering", "XmodForSequenceClassification", "XmodForTokenClassification", "XmodModel", "XmodPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
332
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu lowerCAmelCase_ = False class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCamelCase ( self ) -> List[Any]: return 12 @property def _lowerCamelCase ( self ) -> Dict: return 12 @property def _lowerCamelCase ( self ) -> List[Any]: return 32 @property def _lowerCamelCase ( self ) -> List[Any]: torch.manual_seed(0 ) snake_case = VQModel( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, ) return model @property def _lowerCamelCase ( self ) -> List[Any]: snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def _lowerCamelCase ( self ) -> Tuple: torch.manual_seed(0 ) snake_case = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(lowercase_ ) @property def _lowerCamelCase ( self ) -> str: torch.manual_seed(0 ) snake_case = 12 snake_case = 12 snake_case = { 'attention_bias': True, 'cross_attention_dim': 32, 'attention_head_dim': height * width, 'num_attention_heads': 1, 'num_vector_embeds': self.num_embed, 'num_embeds_ada_norm': self.num_embeds_ada_norm, 'norm_num_groups': 32, 'sample_size': width, 'activation_fn': 'geglu-approximate', } snake_case = TransformeraDModel(**lowercase_ ) return model def _lowerCamelCase ( self ) -> Tuple: snake_case = 'cpu' snake_case = self.dummy_vqvae snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_transformer snake_case = VQDiffusionScheduler(self.num_embed ) snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ ) snake_case = VQDiffusionPipeline( vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'teddy bear playing in the pool' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( [prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = 'cpu' snake_case = self.dummy_vqvae snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_transformer snake_case = VQDiffusionScheduler(self.num_embed ) snake_case = LearnedClassifierFreeSamplingEmbeddings( learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length ) snake_case = VQDiffusionPipeline( vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'teddy bear playing in the pool' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( [prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self ) -> str: snake_case = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' ) snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' ) snake_case = pipeline.to(lowercase_ ) pipeline.set_progress_bar_config(disable=lowercase_ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipeline( 'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', ) snake_case = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
332
1
'''simple docstring''' import pytest lowerCAmelCase_ = "__dummy_dataset1__" lowerCAmelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n" @pytest.fixture def __magic_name__ ( ) -> List[Any]: return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def __magic_name__ ( ) -> Union[str, Any]: return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def __magic_name__ ( A , A , A ) -> Optional[int]: snake_case = dataset_loading_script_name snake_case = tmp_path / 'datasets' / script_name script_dir.mkdir(parents=A ) snake_case = script_dir / F'''{script_name}.py''' with open(A , 'w' ) as f: f.write(A ) return str(A )
332
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase ( metaclass=__lowerCAmelCase ): snake_case_ = ['''note_seq'''] def __init__( self, *lowercase_, **lowercase_ ) -> str: requires_backends(self, ['note_seq'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]: requires_backends(cls, ['note_seq'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]: requires_backends(cls, ['note_seq'] )
332
1