code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 lowerCAmelCase_ = get_tests_dir("fixtures") class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> List[str]: # A mock response for an HTTP head request to emulate server down snake_case = mock.Mock() snake_case = 500 snake_case = {} snake_case = HTTPError snake_case = {} # Download this model to make sure it's in the cache. snake_case = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request', return_value=lowercase_ ) as mock_head: snake_case = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # This check we did call the fake head request mock_head.assert_called() def _lowerCamelCase ( self ) -> int: # This test is for deprecated behavior and can be removed in v5 snake_case = WavaVecaFeatureExtractor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' ) @is_staging_test class lowerCamelCase ( unittest.TestCase ): @classmethod def _lowerCamelCase ( cls ) -> Union[str, Any]: snake_case = TOKEN HfFolder.save_token(lowercase_ ) @classmethod def _lowerCamelCase ( cls ) -> Optional[int]: try: delete_repo(token=cls._token, repo_id='test-feature-extractor' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='valid_org/test-feature-extractor-org' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='test-dynamic-feature-extractor' ) except HTTPError: pass def _lowerCamelCase ( self ) -> Dict: snake_case = WavaVecaFeatureExtractor.from_pretrained(lowercase_ ) feature_extractor.push_to_hub('test-feature-extractor', use_auth_token=self._token ) snake_case = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(lowercase_, getattr(lowercase_, lowercase_ ) ) # Reset repo delete_repo(token=self._token, repo_id='test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( lowercase_, repo_id='test-feature-extractor', push_to_hub=lowercase_, use_auth_token=self._token ) snake_case = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(lowercase_, getattr(lowercase_, lowercase_ ) ) def _lowerCamelCase ( self ) -> Tuple: snake_case = WavaVecaFeatureExtractor.from_pretrained(lowercase_ ) feature_extractor.push_to_hub('valid_org/test-feature-extractor', use_auth_token=self._token ) snake_case = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(lowercase_, getattr(lowercase_, lowercase_ ) ) # Reset repo delete_repo(token=self._token, repo_id='valid_org/test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( lowercase_, repo_id='valid_org/test-feature-extractor-org', push_to_hub=lowercase_, use_auth_token=self._token ) snake_case = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(lowercase_, getattr(lowercase_, lowercase_ ) ) def _lowerCamelCase ( self ) -> Any: CustomFeatureExtractor.register_for_auto_class() snake_case = CustomFeatureExtractor.from_pretrained(lowercase_ ) feature_extractor.push_to_hub('test-dynamic-feature-extractor', use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map, {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'}, ) snake_case = AutoFeatureExtractor.from_pretrained( F'''{USER}/test-dynamic-feature-extractor''', trust_remote_code=lowercase_ ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__, 'CustomFeatureExtractor' )
332
'''simple docstring''' import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor lowerCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase ( __lowerCAmelCase ): def __init__( self, *lowercase_, **lowercase_ ) -> None: warnings.warn( 'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DPTImageProcessor instead.', lowercase_, ) super().__init__(*lowercase_, **lowercase_ )
332
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class lowerCamelCase : def __init__( self, lowercase_, lowercase_=13, lowercase_=7, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=99, lowercase_=32, lowercase_=5, lowercase_=4, lowercase_=37, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=16, lowercase_=2, lowercase_=0.02, lowercase_=3, lowercase_=4, lowercase_=None, ) -> Dict: snake_case = parent snake_case = batch_size snake_case = seq_length snake_case = is_training snake_case = use_token_type_ids snake_case = use_labels snake_case = vocab_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = intermediate_size snake_case = hidden_act snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_vocab_size snake_case = type_sequence_label_size snake_case = initializer_range snake_case = num_labels snake_case = num_choices snake_case = scope snake_case = self.vocab_size - 1 def _lowerCamelCase ( self ) -> int: snake_case = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) snake_case = None if self.use_token_type_ids: snake_case = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) snake_case = None snake_case = None snake_case = None if self.use_labels: snake_case = ids_tensor([self.batch_size], self.type_sequence_label_size ) snake_case = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) snake_case = ids_tensor([self.batch_size], self.num_choices ) snake_case = OpenAIGPTConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, ) snake_case = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, *lowercase_ ) -> int: snake_case = OpenAIGPTModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case = model(lowercase_, token_type_ids=lowercase_, head_mask=lowercase_ ) snake_case = model(lowercase_, token_type_ids=lowercase_ ) snake_case = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, *lowercase_ ) -> Optional[int]: snake_case = OpenAIGPTLMHeadModel(lowercase_ ) model.to(lowercase_ ) model.eval() snake_case = model(lowercase_, token_type_ids=lowercase_, labels=lowercase_ ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, *lowercase_ ) -> List[Any]: snake_case = OpenAIGPTDoubleHeadsModel(lowercase_ ) model.to(lowercase_ ) model.eval() snake_case = model(lowercase_, token_type_ids=lowercase_, labels=lowercase_ ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, *lowercase_ ) -> int: snake_case = self.num_labels snake_case = OpenAIGPTForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() snake_case = ids_tensor([self.batch_size], self.type_sequence_label_size ) snake_case = model(lowercase_, token_type_ids=lowercase_, labels=lowercase_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self ) -> List[str]: snake_case = self.prepare_config_and_inputs() ( ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ) = config_and_inputs snake_case = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): snake_case_ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) snake_case_ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly snake_case_ = ( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> Union[str, Any]: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_=False ) -> Union[str, Any]: snake_case = super()._prepare_for_class(lowercase_, lowercase_, return_labels=lowercase_ ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": snake_case = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length), dtype=torch.long, device=lowercase_, ) snake_case = inputs_dict['labels'] snake_case = inputs_dict['labels'] snake_case = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices), dtype=torch.long, device=lowercase_, ) snake_case = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowercase_ ) return inputs_dict def _lowerCamelCase ( self ) -> int: snake_case = OpenAIGPTModelTester(self ) snake_case = ConfigTester(self, config_class=lowercase_, n_embd=37 ) def _lowerCamelCase ( self ) -> Union[str, Any]: self.config_tester.run_common_tests() def _lowerCamelCase ( self ) -> Optional[int]: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*lowercase_ ) def _lowerCamelCase ( self ) -> Union[str, Any]: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowercase_ ) def _lowerCamelCase ( self ) -> Optional[int]: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*lowercase_ ) def _lowerCamelCase ( self ) -> List[str]: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase_ ) @slow def _lowerCamelCase ( self ) -> List[str]: for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case = OpenAIGPTModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) @require_torch class lowerCamelCase ( unittest.TestCase ): @slow def _lowerCamelCase ( self ) -> str: snake_case = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(lowercase_ ) snake_case = torch.tensor([[481, 4735, 544]], dtype=torch.long, device=lowercase_ ) # the president is snake_case = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the snake_case = model.generate(lowercase_, do_sample=lowercase_ ) self.assertListEqual(output_ids[0].tolist(), lowercase_ )
332
'''simple docstring''' import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset lowerCAmelCase_ = pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) lowerCAmelCase_ = dataset.iloc[:, 1:2].values lowerCAmelCase_ = dataset.iloc[:, 2].values lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0) lowerCAmelCase_ = PolynomialFeatures(degree=4) lowerCAmelCase_ = poly_reg.fit_transform(X) lowerCAmelCase_ = LinearRegression() pol_reg.fit(X_poly, y) def __magic_name__ ( ) -> Any: plt.scatter(A , A , color='red' ) plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' ) plt.title('Truth or Bluff (Linear Regression)' ) plt.xlabel('Position level' ) plt.ylabel('Salary' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
332
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { "facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json", } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''data2vec-text''' def __init__( self, lowercase_=30522, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> str: super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ ) snake_case = vocab_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = hidden_act snake_case = intermediate_size snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_vocab_size snake_case = initializer_range snake_case = layer_norm_eps snake_case = position_embedding_type snake_case = use_cache snake_case = classifier_dropout class lowerCamelCase ( __lowerCAmelCase ): @property def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
332
'''simple docstring''' import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''''' snake_case_ = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) snake_case_ = None # compression type in fsspec. ex: "gzip" snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str: super().__init__(self, **lowercase_ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode snake_case = fsspec.open( lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={ 'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459 'trust_env': True, # Enable reading proxy env variables. **(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed. }, **(target_options or {}), ) snake_case = os.path.basename(self.file.path.split('::' )[0] ) snake_case = ( self.compressed_name[: self.compressed_name.rindex('.' )] if '.' in self.compressed_name else self.compressed_name ) snake_case = None @classmethod def _lowerCamelCase ( cls, lowercase_ ) -> Any: # compressed file paths are always relative to the archive root return super()._strip_protocol(lowercase_ ).lstrip('/' ) def _lowerCamelCase ( self ) -> Optional[Any]: if self.dir_cache is None: snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name} snake_case = {f['name']: f} def _lowerCamelCase ( self, lowercase_ ) -> str: return self.file.open().read() def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any: snake_case = self._strip_protocol(lowercase_ ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''bz2''' snake_case_ = '''bz2''' snake_case_ = '''.bz2''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''gzip''' snake_case_ = '''gzip''' snake_case_ = '''.gz''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''lz4''' snake_case_ = '''lz4''' snake_case_ = '''.lz4''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''xz''' snake_case_ = '''xz''' snake_case_ = '''.xz''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''zstd''' snake_case_ = '''zstd''' snake_case_ = '''.zst''' def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]: super().__init__( fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 snake_case = self.file.__enter__ class lowerCamelCase : def __init__( self, lowercase_ ) -> List[Any]: snake_case = file_ def __enter__( self ) -> Dict: self._file.__enter__() return self def __exit__( self, *lowercase_, **lowercase_ ) -> Dict: self._file.__exit__(*lowercase_, **lowercase_ ) def __iter__( self ) -> List[str]: return iter(self._file ) def _lowerCamelCase ( self ) -> List[str]: return next(self._file ) def __getattr__( self, lowercase_ ) -> List[Any]: return getattr(self._file, lowercase_ ) def fixed_enter(*lowercase_, **lowercase_ ): return WrappedFile(_enter(*lowercase_, **lowercase_ ) ) snake_case = fixed_enter
332
1
'''simple docstring''' import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def __magic_name__ ( A ) -> tuple: return (data["data"], data["target"]) def __magic_name__ ( A , A ) -> XGBClassifier: snake_case = XGBClassifier() classifier.fit(A , A ) return classifier def __magic_name__ ( ) -> None: snake_case = load_iris() snake_case , snake_case = data_handling(A ) snake_case , snake_case , snake_case , snake_case = train_test_split( A , A , test_size=0.25 ) snake_case = iris['target_names'] # Create an XGBoost Classifier from the training data snake_case = xgboost(A , A ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( A , A , A , display_labels=A , cmap='Blues' , normalize='true' , ) plt.title('Normalized Confusion Matrix - IRIS Dataset' ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A , A , A ) -> int | float: if len(A ) == 0: raise ValueError('find_max() arg is an empty sequence' ) if ( left >= len(A ) or left < -len(A ) or right >= len(A ) or right < -len(A ) ): raise IndexError('list index out of range' ) if left == right: return nums[left] snake_case = (left + right) >> 1 # the middle snake_case = find_max(A , A , A ) # find max in range[left, mid] snake_case = find_max(A , mid + 1 , A ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
332
1
'''simple docstring''' from math import factorial lowerCAmelCase_ = {str(d): factorial(d) for d in range(1_0)} def __magic_name__ ( A ) -> int: return sum(DIGIT_FACTORIAL[d] for d in str(A ) ) def __magic_name__ ( ) -> int: snake_case = 7 * factorial(9 ) + 1 return sum(i for i in range(3 , A ) if sum_of_digit_factorial(A ) == i ) if __name__ == "__main__": print(f"{solution() = }")
332
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = 42 class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ): @register_to_config def __init__( self, lowercase_ = 3, lowercase_ = 3, lowercase_ = ("DownEncoderBlock2D",), lowercase_ = ("UpDecoderBlock2D",), lowercase_ = (64,), lowercase_ = 1, lowercase_ = "silu", lowercase_ = 3, lowercase_ = 32, lowercase_ = 256, lowercase_ = 32, lowercase_ = None, lowercase_ = 0.18_215, lowercase_ = "group", ) -> str: super().__init__() # pass init params to Encoder snake_case = Encoder( in_channels=lowercase_, out_channels=lowercase_, down_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, double_z=lowercase_, ) snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels snake_case = nn.Convad(lowercase_, lowercase_, 1 ) snake_case = VectorQuantizer(lowercase_, lowercase_, beta=0.25, remap=lowercase_, sane_index_shape=lowercase_ ) snake_case = nn.Convad(lowercase_, lowercase_, 1 ) # pass init params to Decoder snake_case = Decoder( in_channels=lowercase_, out_channels=lowercase_, up_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, norm_type=lowercase_, ) @apply_forward_hook def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> VQEncoderOutput: snake_case = self.encoder(lowercase_ ) snake_case = self.quant_conv(lowercase_ ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowercase_ ) @apply_forward_hook def _lowerCamelCase ( self, lowercase_, lowercase_ = False, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: # also go through quantization layer if not force_not_quantize: snake_case , snake_case , snake_case = self.quantize(lowercase_ ) else: snake_case = h snake_case = self.post_quant_conv(lowercase_ ) snake_case = self.decoder(lowercase_, quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: snake_case = sample snake_case = self.encode(lowercase_ ).latents snake_case = self.decode(lowercase_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowercase_ )
332
1
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class lowerCamelCase : snake_case_ = 42 snake_case_ = 42 class lowerCamelCase : def __init__( self, lowercase_ ) -> List[str]: snake_case = [[] for _ in range(lowercase_ )] snake_case = size def __getitem__( self, lowercase_ ) -> Iterator[Edge]: return iter(self._graph[vertex] ) @property def _lowerCamelCase ( self ) -> Union[str, Any]: return self._size def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ ) -> Optional[int]: if weight not in (0, 1): raise ValueError('Edge weight must be either 0 or 1.' ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError('Vertex indexes must be in [0; size).' ) self._graph[from_vertex].append(Edge(lowercase_, lowercase_ ) ) def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> int | None: snake_case = deque([start_vertex] ) snake_case = [None] * self.size snake_case = 0 while queue: snake_case = queue.popleft() snake_case = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: snake_case = current_distance + edge.weight snake_case = distances[edge.destination_vertex] if ( isinstance(lowercase_, lowercase_ ) and new_distance >= dest_vertex_distance ): continue snake_case = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError('No path from start_vertex to finish_vertex.' ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
332
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int: snake_case = [0] snake_case = 42 for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target snake_case = 0 # the area corresponding to the grid that gives the product closest to target snake_case = 0 # an estimate of b, using the quadratic formula snake_case = 42 # the largest integer less than b_estimate snake_case = 42 # the largest integer less than b_estimate snake_case = 42 # the triangle number corresponding to b_floor snake_case = 42 # the triangle number corresponding to b_ceil snake_case = 42 for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 snake_case = floor(A ) snake_case = ceil(A ) snake_case = triangle_numbers[b_floor] snake_case = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): snake_case = triangle_b_first_guess * triangle_a snake_case = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): snake_case = triangle_b_second_guess * triangle_a snake_case = idx_a * b_ceil return area if __name__ == "__main__": print(f"{solution() = }")
332
1
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): snake_case_ = IFInpaintingSuperResolutionPipeline snake_case_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} snake_case_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} ) snake_case_ = PipelineTesterMixin.required_optional_params - {'''latents'''} def _lowerCamelCase ( self ) -> Any: return self._get_superresolution_dummy_components() def _lowerCamelCase ( self, lowercase_, lowercase_=0 ) -> Optional[int]: if str(lowercase_ ).startswith('mps' ): snake_case = torch.manual_seed(lowercase_ ) else: snake_case = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) snake_case = floats_tensor((1, 3, 16, 16), rng=random.Random(lowercase_ ) ).to(lowercase_ ) snake_case = floats_tensor((1, 3, 32, 32), rng=random.Random(lowercase_ ) ).to(lowercase_ ) snake_case = floats_tensor((1, 3, 32, 32), rng=random.Random(lowercase_ ) ).to(lowercase_ ) snake_case = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'original_image': original_image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', ) def _lowerCamelCase ( self ) -> Optional[Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def _lowerCamelCase ( self ) -> Optional[Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda', reason='float16 requires CUDA' ) def _lowerCamelCase ( self ) -> Tuple: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def _lowerCamelCase ( self ) -> Union[str, Any]: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def _lowerCamelCase ( self ) -> Dict: self._test_save_load_local() def _lowerCamelCase ( self ) -> Optional[int]: self._test_inference_batch_single_identical( expected_max_diff=1E-2, )
332
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = { "configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"], "processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["VisionTextDualEncoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["FlaxVisionTextDualEncoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["TFVisionTextDualEncoderModel"] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
332
1
'''simple docstring''' def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int: snake_case = [0 for i in range(n + 1 )] snake_case = 1 snake_case = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , A ): snake_case = 1 snake_case = 0 for i in range(A ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f"{solution() = }")
332
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCamelCase : snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} ) snake_case_ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) snake_case_ = field( default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , ) snake_case_ = field( default=64 , metadata={ '''help''': ( '''The maximum number of tokens for the question. Questions longer than this will ''' '''be truncated to this length.''' ) } , ) snake_case_ = field( default=30 , metadata={ '''help''': ( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ) } , ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} ) snake_case_ = field( default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) snake_case_ = field( default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) snake_case_ = field( default=0 , metadata={ '''help''': ( '''language id of input for language-specific xlm models (see''' ''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)''' ) } , ) snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} ) class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''train''' snake_case_ = '''dev''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int: snake_case = args snake_case = is_language_sensitive snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(lowercase_, lowercase_ ): try: snake_case = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) snake_case = mode # Load data features from cache or dataset file snake_case = 'v2' if args.version_2_with_negative else 'v1' snake_case = os.path.join( cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. snake_case = cached_features_file + '.lock' with FileLock(lowercase_ ): if os.path.exists(lowercase_ ) and not args.overwrite_cache: snake_case = time.time() snake_case = torch.load(lowercase_ ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. snake_case = self.old_features['features'] snake_case = self.old_features.get('dataset', lowercase_ ) snake_case = self.old_features.get('examples', lowercase_ ) logger.info( F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' ' future run' ) else: if mode == Split.dev: snake_case = self.processor.get_dev_examples(args.data_dir ) else: snake_case = self.processor.get_train_examples(args.data_dir ) snake_case , snake_case = squad_convert_examples_to_features( examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, ) snake_case = time.time() torch.save( {'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self ) -> Tuple: return len(self.features ) def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]: # Convert to Tensors and build dataset snake_case = self.features[i] snake_case = torch.tensor(feature.input_ids, dtype=torch.long ) snake_case = torch.tensor(feature.attention_mask, dtype=torch.long ) snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long ) snake_case = torch.tensor(feature.cls_index, dtype=torch.long ) snake_case = torch.tensor(feature.p_mask, dtype=torch.float ) snake_case = torch.tensor(feature.is_impossible, dtype=torch.float ) snake_case = { 'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'cls_index': cls_index, 'p_mask': p_mask} ) if self.args.version_2_with_negative: inputs.update({'is_impossible': is_impossible} ) if self.is_language_sensitive: inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: snake_case = torch.tensor(feature.start_position, dtype=torch.long ) snake_case = torch.tensor(feature.end_position, dtype=torch.long ) inputs.update({'start_positions': start_positions, 'end_positions': end_positions} ) return inputs
332
1
'''simple docstring''' import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class lowerCamelCase ( unittest.TestCase ): snake_case_ = JukeboxTokenizer snake_case_ = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def _lowerCamelCase ( self ) -> str: import torch snake_case = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' ) snake_case = tokenizer(**self.metas )['input_ids'] # fmt: off snake_case = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2] ) ) @require_torch def _lowerCamelCase ( self ) -> Optional[int]: import torch snake_case = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' ) snake_case = tokenizer(**self.metas )['input_ids'] # fmt: off snake_case = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2] ) )
332
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def __magic_name__ ( A , A , A ) -> Any: # Initialise PyTorch model snake_case = BertConfig.from_json_file(A ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case = BertForPreTraining(A ) # Load weights from tf checkpoint load_tf_weights_in_bert(A , A , A ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , A ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--bert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
332
1
'''simple docstring''' from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { "nielsr/canine-s": 2_0_4_8, } # Unicode defines 1,114,112 total “codepoints” lowerCAmelCase_ = 1_1_1_4_1_1_2 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py lowerCAmelCase_ = 0 lowerCAmelCase_ = 0xE000 lowerCAmelCase_ = 0xE001 lowerCAmelCase_ = 0xE002 lowerCAmelCase_ = 0xE003 lowerCAmelCase_ = 0xE004 # Maps special codepoints to human-readable names. lowerCAmelCase_ = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. lowerCAmelCase_ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, lowercase_=chr(lowercase_ ), lowercase_=chr(lowercase_ ), lowercase_=chr(lowercase_ ), lowercase_=chr(lowercase_ ), lowercase_=chr(lowercase_ ), lowercase_=chr(lowercase_ ), lowercase_=False, lowercase_=2048, **lowercase_, ) -> List[Any]: snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else bos_token snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else eos_token snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else sep_token snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else cls_token snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else mask_token super().__init__( bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, model_max_length=lowercase_, **lowercase_, ) # Creates a mapping for looking up the IDs of special symbols. snake_case = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): snake_case = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. snake_case = { codepoint: name for name, codepoint in self._special_codepoints.items() } snake_case = UNICODE_VOCAB_SIZE snake_case = len(self._special_codepoints ) @property def _lowerCamelCase ( self ) -> int: return self._unicode_vocab_size def _lowerCamelCase ( self, lowercase_ ) -> List[str]: return list(lowercase_ ) def _lowerCamelCase ( self, lowercase_ ) -> int: try: return ord(lowercase_ ) except TypeError: raise ValueError(F'''invalid token: \'{token}\'''' ) def _lowerCamelCase ( self, lowercase_ ) -> str: try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(lowercase_ ) except TypeError: raise ValueError(F'''invalid id: {index}''' ) def _lowerCamelCase ( self, lowercase_ ) -> Tuple: return "".join(lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]: snake_case = [self.sep_token_id] snake_case = [self.cls_token_id] snake_case = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_, token_ids_a=lowercase_, already_has_special_tokens=lowercase_ ) snake_case = [1] + ([0] * len(lowercase_ )) + [1] if token_ids_a is not None: result += ([0] * len(lowercase_ )) + [1] return result def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]: snake_case = [self.sep_token_id] snake_case = [self.cls_token_id] snake_case = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple: return ()
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A ) -> list: if len(A ) == 0: return [] snake_case , snake_case = min(A ), max(A ) snake_case = int(max_value - min_value ) + 1 snake_case = [[] for _ in range(A )] for i in my_list: buckets[int(i - min_value )].append(A ) return [v for bucket in buckets for v in sorted(A )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
332
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "adapter_layer": "encoder.layers.*.adapter_layer", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", "pooling_layer.linear": "projector", "pooling_layer.projection": "classifier", } lowerCAmelCase_ = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "projector", "classifier", ] def __magic_name__ ( A ) -> List[str]: snake_case = {} with open(A , 'r' ) as file: for line_number, line in enumerate(A ): snake_case = line.strip() if line: snake_case = line.split() snake_case = line_number snake_case = words[0] snake_case = value return result def __magic_name__ ( A , A , A , A , A ) -> Any: for attribute in key.split('.' ): snake_case = getattr(A , A ) snake_case = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(A ): snake_case = PARAM_MAPPING[full_name.split('.' )[-1]] snake_case = 'param' if weight_type is not None and weight_type != "param": snake_case = getattr(A , A ).shape elif weight_type is not None and weight_type == "param": snake_case = hf_pointer for attribute in hf_param_name.split('.' ): snake_case = getattr(A , A ) snake_case = shape_pointer.shape # let's reduce dimension snake_case = value[0] else: snake_case = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": snake_case = value elif weight_type == "weight_g": snake_case = value elif weight_type == "weight_v": snake_case = value elif weight_type == "bias": snake_case = value elif weight_type == "param": for attribute in hf_param_name.split('.' ): snake_case = getattr(A , A ) snake_case = value else: snake_case = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def __magic_name__ ( A , A , A , A , A ) -> int: snake_case = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(A ): snake_case = PARAM_MAPPING[full_name.split('.' )[-1]] snake_case = 'param' if weight_type is not None and weight_type != "param": snake_case = '.'.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": snake_case = '.'.join([key, hf_param_name] ) else: snake_case = key snake_case = value if 'lm_head' in full_key else value[0] lowerCAmelCase_ = { "W_a": "linear_1.weight", "W_b": "linear_2.weight", "b_a": "linear_1.bias", "b_b": "linear_2.bias", "ln_W": "norm.weight", "ln_b": "norm.bias", } def __magic_name__ ( A , A , A=None , A=None ) -> List[Any]: snake_case = False for key, mapped_key in MAPPING.items(): snake_case = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: snake_case = True if "*" in mapped_key: snake_case = name.split(A )[0].split('.' )[-2] snake_case = mapped_key.replace('*' , A ) if "weight_g" in name: snake_case = 'weight_g' elif "weight_v" in name: snake_case = 'weight_v' elif "bias" in name: snake_case = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case = 'weight' else: snake_case = None if hf_dict is not None: rename_dict(A , A , A , A , A ) else: set_recursively(A , A , A , A , A ) return is_used return is_used def __magic_name__ ( A , A , A ) -> Optional[int]: snake_case = [] snake_case = fairseq_model.state_dict() snake_case = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): snake_case = False if "conv_layers" in name: load_conv_layer( A , A , A , A , hf_model.config.feat_extract_norm == 'group' , ) snake_case = True else: snake_case = load_wavaveca_layer(A , A , A ) if not is_used: unused_weights.append(A ) logger.warning(F'''Unused weights: {unused_weights}''' ) def __magic_name__ ( A , A , A , A , A ) -> Tuple: snake_case = full_name.split('conv_layers.' )[-1] snake_case = name.split('.' ) snake_case = int(items[0] ) snake_case = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) snake_case = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) snake_case = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) snake_case = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) snake_case = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(A ) @torch.no_grad() def __magic_name__ ( A , A , A=None , A=None , A=True , A=False ) -> Tuple: if config_path is not None: snake_case = WavaVecaConfig.from_pretrained(A ) else: snake_case = WavaVecaConfig() if is_seq_class: snake_case = read_txt_into_dict(A ) snake_case = idalabel snake_case = WavaVecaForSequenceClassification(A ) snake_case = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=A , return_attention_mask=A , ) feature_extractor.save_pretrained(A ) elif is_finetuned: if dict_path: snake_case = Dictionary.load(A ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case = target_dict.pad_index snake_case = target_dict.bos_index snake_case = target_dict.eos_index snake_case = len(target_dict.symbols ) snake_case = os.path.join(A , 'vocab.json' ) if not os.path.isdir(A ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(A ) ) return os.makedirs(A , exist_ok=A ) snake_case = target_dict.indices # fairseq has the <pad> and <s> switched snake_case = 0 snake_case = 1 with open(A , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(A , A ) snake_case = WavaVecaCTCTokenizer( A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=A , ) snake_case = True if config.feat_extract_norm == 'layer' else False snake_case = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=A , return_attention_mask=A , ) snake_case = WavaVecaProcessor(feature_extractor=A , tokenizer=A ) processor.save_pretrained(A ) snake_case = WavaVecaForCTC(A ) else: snake_case = WavaVecaForPreTraining(A ) if is_finetuned or is_seq_class: snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: snake_case = argparse.Namespace(task='audio_pretraining' ) snake_case = fairseq.tasks.setup_task(A ) snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A ) snake_case = model[0].eval() recursively_load_weights(A , A , not is_finetuned ) hf_wavavec.save_pretrained(A ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) parser.add_argument( "--is_seq_class", action="store_true", help="Whether the model to convert is a fine-tuned sequence classification model or not", ) lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
332
'''simple docstring''' def __magic_name__ ( A ) -> float: return 1_0 - x * x def __magic_name__ ( A , A ) -> float: # Bolzano theory in order to find if there is a root between a and b if equation(A ) * equation(A ) >= 0: raise ValueError('Wrong space!' ) snake_case = a while (b - a) >= 0.01: # Find middle point snake_case = (a + b) / 2 # Check if middle point is root if equation(A ) == 0.0: break # Decide the side to repeat the steps if equation(A ) * equation(A ) < 0: snake_case = c else: snake_case = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
332
1
'''simple docstring''' lowerCAmelCase_ = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" lowerCAmelCase_ = [{"type": "code", "content": INSTALL_CONTENT}] lowerCAmelCase_ = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
332
'''simple docstring''' import pytest lowerCAmelCase_ = "__dummy_dataset1__" lowerCAmelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n" @pytest.fixture def __magic_name__ ( ) -> List[Any]: return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def __magic_name__ ( ) -> Union[str, Any]: return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def __magic_name__ ( A , A , A ) -> Optional[int]: snake_case = dataset_loading_script_name snake_case = tmp_path / 'datasets' / script_name script_dir.mkdir(parents=A ) snake_case = script_dir / F'''{script_name}.py''' with open(A , 'w' ) as f: f.write(A ) return str(A )
332
1
'''simple docstring''' def __magic_name__ ( A = 3 , A = 7 , A = 1_0_0_0_0_0_0 ) -> int: snake_case = 0 snake_case = 1 for current_denominator in range(1 , limit + 1 ): snake_case = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: snake_case = current_numerator snake_case = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
332
'''simple docstring''' from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time lowerCAmelCase_ = Lock() def __magic_name__ ( A , A , A , A , A , A , A ) -> Any: global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 1_0 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(A ) process_lock.release() # receive your right neighbor's value process_lock.acquire() snake_case = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left snake_case = min(A , A ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(A ) process_lock.release() # receive your left neighbor's value process_lock.acquire() snake_case = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right snake_case = max(A , A ) # after all swaps are performed, send the values back to main result_pipe[1].send(A ) def __magic_name__ ( A ) -> str: snake_case = [] snake_case = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop snake_case = Pipe() snake_case = Pipe() process_array_.append( Process( target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) snake_case = temp_rs snake_case = temp_rr for i in range(1 , len(A ) - 1 ): snake_case = Pipe() snake_case = Pipe() process_array_.append( Process( target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) snake_case = temp_rs snake_case = temp_rr process_array_.append( Process( target=A , args=( len(A ) - 1, arr[len(A ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(A ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(A ) ): snake_case = result_pipe[p][0].recv() process_array_[p].join() return arr def __magic_name__ ( ) -> Tuple: snake_case = list(range(1_0 , 0 , -1 ) ) print('Initial List' ) print(*A ) snake_case = odd_even_transposition(A ) print('Sorted List\n' ) print(*A ) if __name__ == "__main__": main()
332
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = ['''pixel_values'''] def __init__( self, lowercase_ = True, lowercase_ = None, lowercase_ = PIL.Image.BICUBIC, lowercase_ = True, lowercase_ = None, lowercase_ = 1 / 255, lowercase_ = True, lowercase_ = True, lowercase_ = None, lowercase_ = None, **lowercase_, ) -> None: super().__init__(**lowercase_ ) snake_case = size if size is not None else {'height': 256, 'width': 256} snake_case = get_size_dict(lowercase_ ) snake_case = crop_size if crop_size is not None else {'height': 224, 'width': 224} snake_case = get_size_dict(lowercase_, param_name='crop_size' ) snake_case = do_resize snake_case = size snake_case = resample snake_case = do_center_crop snake_case = crop_size snake_case = do_rescale snake_case = rescale_factor snake_case = do_normalize snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ = PIL.Image.BICUBIC, lowercase_ = None, **lowercase_, ) -> np.ndarray: snake_case = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return resize( lowercase_, size=(size['height'], size['width']), resample=lowercase_, data_format=lowercase_, **lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ = None, **lowercase_, ) -> np.ndarray: snake_case = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(lowercase_, size=(size['height'], size['width']), data_format=lowercase_, **lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ = None, **lowercase_, ) -> int: return rescale(lowercase_, scale=lowercase_, data_format=lowercase_, **lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_ = None, **lowercase_, ) -> np.ndarray: return normalize(lowercase_, mean=lowercase_, std=lowercase_, data_format=lowercase_, **lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = None, lowercase_=None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = ChannelDimension.FIRST, **lowercase_, ) -> PIL.Image.Image: snake_case = do_resize if do_resize is not None else self.do_resize snake_case = resample if resample is not None else self.resample snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case = do_rescale if do_rescale is not None else self.do_rescale snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case = do_normalize if do_normalize is not None else self.do_normalize snake_case = image_mean if image_mean is not None else self.image_mean snake_case = image_std if image_std is not None else self.image_std snake_case = size if size is not None else self.size snake_case = get_size_dict(lowercase_ ) snake_case = crop_size if crop_size is not None else self.crop_size snake_case = get_size_dict(lowercase_, param_name='crop_size' ) snake_case = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. snake_case = [to_numpy_array(lowercase_ ) for image in images] if do_resize: snake_case = [self.resize(image=lowercase_, size=lowercase_, resample=lowercase_ ) for image in images] if do_center_crop: snake_case = [self.center_crop(image=lowercase_, size=lowercase_ ) for image in images] if do_rescale: snake_case = [self.rescale(image=lowercase_, scale=lowercase_ ) for image in images] if do_normalize: snake_case = [self.normalize(image=lowercase_, mean=lowercase_, std=lowercase_ ) for image in images] snake_case = [to_channel_dimension_format(lowercase_, lowercase_ ) for image in images] snake_case = {'pixel_values': images} return BatchFeature(data=lowercase_, tensor_type=lowercase_ )
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A ) -> None: create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] ) def __magic_name__ ( A , A , A , A , ) -> None: if index == len(A ): print(A ) return for i in range(len(A ) ): if not index_used[i]: current_sequence.append(sequence[i] ) snake_case = True create_state_space_tree(A , A , index + 1 , A ) current_sequence.pop() snake_case = False lowerCAmelCase_ = [3, 1, 2, 4] generate_all_permutations(sequence) lowerCAmelCase_ = ["A", "B", "C"] generate_all_permutations(sequence_a)
332
1
'''simple docstring''' import math import random def __magic_name__ ( A , A = False ) -> float: if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value lowerCAmelCase_ = 0.02 def __magic_name__ ( A , A ) -> float: snake_case = float(2 * (random.randint(1 , 1_0_0 )) - 1 ) for _ in range(A ): # Forward propagation snake_case = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? snake_case = (expected / 1_0_0) - layer_a # Error delta snake_case = layer_1_error * sigmoid_function(A , A ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 1_0_0 if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase_ = int(input("Expected value: ")) lowerCAmelCase_ = int(input("Number of propagations: ")) print(forward_propagation(expected, number_propagations))
332
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''roberta''' def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple: super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ ) snake_case = vocab_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = hidden_act snake_case = intermediate_size snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_vocab_size snake_case = initializer_range snake_case = layer_norm_eps snake_case = position_embedding_type snake_case = use_cache snake_case = classifier_dropout class lowerCamelCase ( __lowerCAmelCase ): @property def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
332
1
'''simple docstring''' from sklearn.metrics import mean_squared_error import datasets lowerCAmelCase_ = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" lowerCAmelCase_ = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n" lowerCAmelCase_ = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase ( datasets.Metric ): def _lowerCamelCase ( self ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(self._get_feature_types() ), reference_urls=[ 'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html' ], ) def _lowerCamelCase ( self ) -> Dict: if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('float' ) ), "references": datasets.Sequence(datasets.Value('float' ) ), } else: return { "predictions": datasets.Value('float' ), "references": datasets.Value('float' ), } def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_=None, lowercase_="uniform_average", lowercase_=True ) -> List[Any]: snake_case = mean_squared_error( lowercase_, lowercase_, sample_weight=lowercase_, multioutput=lowercase_, squared=lowercase_ ) return {"mse": mse}
332
'''simple docstring''' import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} lowerCAmelCase_ = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } lowerCAmelCase_ = { "allenai/led-base-16384": 1_6_3_8_4, } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = LEDTokenizer snake_case_ = ['''input_ids''', '''attention_mask'''] def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int: super().__init__( lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, ) snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space: snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) ) snake_case = add_prefix_space snake_case = pre_tok_class(**lowercase_ ) snake_case = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` snake_case = 'post_processor' snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ ) if tokenizer_component_instance: snake_case = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: snake_case = tuple(state['sep'] ) if "cls" in state: snake_case = tuple(state['cls'] ) snake_case = False if state.get('add_prefix_space', lowercase_ ) != add_prefix_space: snake_case = add_prefix_space snake_case = True if state.get('trim_offsets', lowercase_ ) != trim_offsets: snake_case = trim_offsets snake_case = True if changes_to_apply: snake_case = getattr(lowercase_, state.pop('type' ) ) snake_case = component_class(**lowercase_ ) setattr(self.backend_tokenizer, lowercase_, lowercase_ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _lowerCamelCase ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _lowerCamelCase ( self, lowercase_ ) -> Any: snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value snake_case = value def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding: snake_case = kwargs.get('is_split_into_words', lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding: snake_case = kwargs.get('is_split_into_words', lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._encode_plus(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]: snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ ) return tuple(lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict: snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]: snake_case = [self.sep_token_id] snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict: snake_case = super()._pad( encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, ) # Load from model defaults if return_attention_mask is None: snake_case = 'attention_mask' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: snake_case = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ ) if needs_to_be_padded: snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` snake_case = ( encoded_inputs['global_attention_mask'] + [-1] * difference ) elif self.padding_side == "left": snake_case = [-1] * difference + encoded_inputs[ 'global_attention_mask' ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return encoded_inputs
332
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''ibert''' def __init__( self, lowercase_=30522, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=False, lowercase_="none", **lowercase_, ) -> str: super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ ) snake_case = vocab_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = hidden_act snake_case = intermediate_size snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_vocab_size snake_case = initializer_range snake_case = layer_norm_eps snake_case = position_embedding_type snake_case = quant_mode snake_case = force_dequant class lowerCamelCase ( __lowerCAmelCase ): @property def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
332
'''simple docstring''' import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def __magic_name__ ( A ) -> Tuple: snake_case = [] embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', F'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', F'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', F'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', F'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def __magic_name__ ( A , A ) -> Optional[int]: snake_case = [] attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def __magic_name__ ( A ) -> List[Any]: snake_case = [] token.append((F'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') ) return token def __magic_name__ ( ) -> Dict: snake_case = [] head.append(('layernorm.weight', 'norm.weight') ) head.append(('layernorm.bias', 'norm.bias') ) head.append(('classifier.weight', 'head.weight') ) head.append(('classifier.bias', 'head.bias') ) return head def __magic_name__ ( A , A , A , A ) -> int: snake_case = 'imagenet-1k-id2label.json' snake_case = 1_0_0_0 snake_case = 'huggingface/label-files' snake_case = num_labels snake_case = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) ) snake_case = {int(A ): v for k, v in idalabel.items()} snake_case = idalabel snake_case = {v: k for k, v in idalabel.items()} snake_case = snake_case = CvtConfig(num_labels=A , idalabel=A , labelaid=A ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13": snake_case = [1, 2, 1_0] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21": snake_case = [1, 4, 1_6] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: snake_case = [2, 2, 2_0] snake_case = [3, 1_2, 1_6] snake_case = [1_9_2, 7_6_8, 1_0_2_4] snake_case = CvtForImageClassification(A ) snake_case = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) snake_case = image_size snake_case = torch.load(A , map_location=torch.device('cpu' ) ) snake_case = OrderedDict() snake_case = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: snake_case = list_of_state_dict + cls_token(A ) snake_case = list_of_state_dict + embeddings(A ) for cnt in range(config.depth[idx] ): snake_case = list_of_state_dict + attention(A , A ) snake_case = list_of_state_dict + final() for gg in list_of_state_dict: print(A ) for i in range(len(A ) ): snake_case = original_weights[list_of_state_dict[i][1]] model.load_state_dict(A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( "--cvt_model", default="cvt-w24", type=str, help="Name of the cvt model you'd like to convert.", ) parser.add_argument( "--image_size", default=3_8_4, type=int, help="Input Image Size", ) parser.add_argument( "--cvt_file_name", default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth", type=str, help="Input Image Size", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) lowerCAmelCase_ = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
332
1
'''simple docstring''' from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder lowerCAmelCase_ = datasets.utils.logging.get_logger(__name__) class lowerCamelCase ( folder_based_builder.FolderBasedBuilderConfig ): snake_case_ = None snake_case_ = None class lowerCamelCase ( folder_based_builder.FolderBasedBuilder ): snake_case_ = datasets.Audio() snake_case_ = '''audio''' snake_case_ = AudioFolderConfig snake_case_ = 42 # definition at the bottom of the script snake_case_ = AudioClassification(audio_column='''audio''' , label_column='''label''' ) lowerCAmelCase_ = [ ".aiff", ".au", ".avr", ".caf", ".flac", ".htk", ".svx", ".mat4", ".mat5", ".mpc2k", ".ogg", ".paf", ".pvf", ".raw", ".rf64", ".sd2", ".sds", ".ircam", ".voc", ".w64", ".wav", ".nist", ".wavex", ".wve", ".xi", ".mp3", ".opus", ] lowerCAmelCase_ = AUDIO_EXTENSIONS
332
'''simple docstring''' from pathlib import Path import fire def __magic_name__ ( A , A , A ) -> Union[str, Any]: snake_case = Path(A ) snake_case = Path(A ) dest_dir.mkdir(exist_ok=A ) for path in src_dir.iterdir(): snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n] snake_case = dest_dir.joinpath(path.name ) print(A ) dest_path.open('w' ).write('\n'.join(A ) ) if __name__ == "__main__": fire.Fire(minify)
332
1
'''simple docstring''' from typing import Dict from .base import GenericTensor, Pipeline class lowerCamelCase ( __lowerCAmelCase ): def _lowerCamelCase ( self, lowercase_=None, lowercase_=None, lowercase_=None, **lowercase_ ) -> List[str]: if tokenize_kwargs is None: snake_case = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) snake_case = truncation snake_case = tokenize_kwargs snake_case = {} if return_tensors is not None: snake_case = return_tensors return preprocess_params, {}, postprocess_params def _lowerCamelCase ( self, lowercase_, **lowercase_ ) -> Dict[str, GenericTensor]: snake_case = self.framework snake_case = self.tokenizer(lowercase_, return_tensors=lowercase_, **lowercase_ ) return model_inputs def _lowerCamelCase ( self, lowercase_ ) -> Any: snake_case = self.model(**lowercase_ ) return model_outputs def _lowerCamelCase ( self, lowercase_, lowercase_=False ) -> Tuple: # [0] is the first available tensor, logits or last_hidden_state. if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self, *lowercase_, **lowercase_ ) -> List[Any]: return super().__call__(*lowercase_, **lowercase_ )
332
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) lowerCAmelCase_ = pytest.mark.integration @pytest.mark.parametrize('path' , ['paws', 'csv'] ) def __magic_name__ ( A , A ) -> Union[str, Any]: inspect_dataset(A , A ) snake_case = path + '.py' assert script_name in os.listdir(A ) assert "__pycache__" not in os.listdir(A ) @pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.parametrize('path' , ['accuracy'] ) def __magic_name__ ( A , A ) -> int: inspect_metric(A , A ) snake_case = path + '.py' assert script_name in os.listdir(A ) assert "__pycache__" not in os.listdir(A ) @pytest.mark.parametrize( 'path, config_name, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> List[str]: snake_case = get_dataset_config_info(A , config_name=A ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __magic_name__ ( A , A , A ) -> Any: with pytest.raises(A ): get_dataset_config_info(A , config_name=A ) @pytest.mark.parametrize( 'path, expected' , [ ('squad', 'plain_text'), ('acronym_identification', 'default'), ('lhoestq/squad', 'plain_text'), ('lhoestq/test', 'default'), ('lhoestq/demo1', 'lhoestq--demo1'), ('dalle-mini/wit', 'dalle-mini--wit'), ] , ) def __magic_name__ ( A , A ) -> Dict: snake_case = get_dataset_config_names(A ) assert expected in config_names @pytest.mark.parametrize( 'path, expected_configs, expected_splits_in_first_config' , [ ('squad', ['plain_text'], ['train', 'validation']), ('dalle-mini/wit', ['dalle-mini--wit'], ['train']), ('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> List[str]: snake_case = get_dataset_infos(A ) assert list(infos.keys() ) == expected_configs snake_case = expected_configs[0] assert expected_config in infos snake_case = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( 'path, expected_config, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> Any: snake_case = get_dataset_infos(A ) assert expected_config in infos snake_case = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __magic_name__ ( A , A , A ) -> int: with pytest.raises(A ): get_dataset_split_names(A , config_name=A )
332
1
'''simple docstring''' import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} lowerCAmelCase_ = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } lowerCAmelCase_ = { "allenai/led-base-16384": 1_6_3_8_4, } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = LEDTokenizer snake_case_ = ['''input_ids''', '''attention_mask'''] def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int: super().__init__( lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, ) snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space: snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) ) snake_case = add_prefix_space snake_case = pre_tok_class(**lowercase_ ) snake_case = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` snake_case = 'post_processor' snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ ) if tokenizer_component_instance: snake_case = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: snake_case = tuple(state['sep'] ) if "cls" in state: snake_case = tuple(state['cls'] ) snake_case = False if state.get('add_prefix_space', lowercase_ ) != add_prefix_space: snake_case = add_prefix_space snake_case = True if state.get('trim_offsets', lowercase_ ) != trim_offsets: snake_case = trim_offsets snake_case = True if changes_to_apply: snake_case = getattr(lowercase_, state.pop('type' ) ) snake_case = component_class(**lowercase_ ) setattr(self.backend_tokenizer, lowercase_, lowercase_ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _lowerCamelCase ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _lowerCamelCase ( self, lowercase_ ) -> Any: snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value snake_case = value def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding: snake_case = kwargs.get('is_split_into_words', lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding: snake_case = kwargs.get('is_split_into_words', lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._encode_plus(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]: snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ ) return tuple(lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict: snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]: snake_case = [self.sep_token_id] snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict: snake_case = super()._pad( encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, ) # Load from model defaults if return_attention_mask is None: snake_case = 'attention_mask' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: snake_case = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ ) if needs_to_be_padded: snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` snake_case = ( encoded_inputs['global_attention_mask'] + [-1] * difference ) elif self.padding_side == "left": snake_case = [-1] * difference + encoded_inputs[ 'global_attention_mask' ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return encoded_inputs
332
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { "configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"], "processing_git": ["GitProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ "GIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GitForCausalLM", "GitModel", "GitPreTrainedModel", "GitVisionModel", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
332
1
'''simple docstring''' from manim import * class lowerCamelCase ( __lowerCAmelCase ): def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = Rectangle(height=0.5, width=0.5 ) snake_case = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 ) snake_case = [mem.copy() for i in range(6 )] snake_case = [mem.copy() for i in range(6 )] snake_case = VGroup(*lowercase_ ).arrange(lowercase_, buff=0 ) snake_case = VGroup(*lowercase_ ).arrange(lowercase_, buff=0 ) snake_case = VGroup(lowercase_, lowercase_ ).arrange(lowercase_, buff=0 ) snake_case = Text('CPU', font_size=24 ) snake_case = Group(lowercase_, lowercase_ ).arrange(lowercase_, buff=0.5, aligned_edge=lowercase_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowercase_ ) snake_case = [mem.copy() for i in range(1 )] snake_case = VGroup(*lowercase_ ).arrange(lowercase_, buff=0 ) snake_case = Text('GPU', font_size=24 ) snake_case = Group(lowercase_, lowercase_ ).arrange(lowercase_, buff=0.5, aligned_edge=lowercase_ ) gpu.align_to(lowercase_, lowercase_ ) gpu.set_x(gpu.get_x() - 1 ) self.add(lowercase_ ) snake_case = [mem.copy() for i in range(6 )] snake_case = VGroup(*lowercase_ ).arrange(lowercase_, buff=0 ) snake_case = Text('Model', font_size=24 ) snake_case = Group(lowercase_, lowercase_ ).arrange(lowercase_, buff=0.5, aligned_edge=lowercase_ ) model.move_to([3, -1.0, 0] ) self.play( Create(lowercase_, run_time=1 ), Create(lowercase_, run_time=1 ), Create(lowercase_, run_time=1 ), ) snake_case = MarkupText( F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''', font_size=24, ) snake_case = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) snake_case = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''', font_size=18, ) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(lowercase_, run_time=2.5 ), Write(lowercase_ ), Write(lowercase_ ) ) self.add(lowercase_ ) snake_case = [] snake_case = [] snake_case = [] for i, rect in enumerate(lowercase_ ): snake_case = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0.0 ).set_fill(lowercase_, opacity=0.7 ) cpu_target.move_to(lowercase_ ) cpu_target.generate_target() snake_case = 0.46 / 4 snake_case = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=lowercase_ ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target, direction=lowercase_, buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target, direction=lowercase_, buff=0.0 ) cpu_targs.append(lowercase_ ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowercase_ ) ) second_animations.append(MoveToTarget(lowercase_, run_time=1.5 ) ) self.play(*lowercase_ ) self.play(*lowercase_ ) self.wait()
332
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu lowerCAmelCase_ = False class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCamelCase ( self ) -> List[Any]: return 12 @property def _lowerCamelCase ( self ) -> Dict: return 12 @property def _lowerCamelCase ( self ) -> List[Any]: return 32 @property def _lowerCamelCase ( self ) -> List[Any]: torch.manual_seed(0 ) snake_case = VQModel( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, ) return model @property def _lowerCamelCase ( self ) -> List[Any]: snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def _lowerCamelCase ( self ) -> Tuple: torch.manual_seed(0 ) snake_case = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(lowercase_ ) @property def _lowerCamelCase ( self ) -> str: torch.manual_seed(0 ) snake_case = 12 snake_case = 12 snake_case = { 'attention_bias': True, 'cross_attention_dim': 32, 'attention_head_dim': height * width, 'num_attention_heads': 1, 'num_vector_embeds': self.num_embed, 'num_embeds_ada_norm': self.num_embeds_ada_norm, 'norm_num_groups': 32, 'sample_size': width, 'activation_fn': 'geglu-approximate', } snake_case = TransformeraDModel(**lowercase_ ) return model def _lowerCamelCase ( self ) -> Tuple: snake_case = 'cpu' snake_case = self.dummy_vqvae snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_transformer snake_case = VQDiffusionScheduler(self.num_embed ) snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ ) snake_case = VQDiffusionPipeline( vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'teddy bear playing in the pool' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( [prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = 'cpu' snake_case = self.dummy_vqvae snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_transformer snake_case = VQDiffusionScheduler(self.num_embed ) snake_case = LearnedClassifierFreeSamplingEmbeddings( learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length ) snake_case = VQDiffusionPipeline( vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'teddy bear playing in the pool' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( [prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self ) -> str: snake_case = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' ) snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' ) snake_case = pipeline.to(lowercase_ ) pipeline.set_progress_bar_config(disable=lowercase_ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipeline( 'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', ) snake_case = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
332
1
'''simple docstring''' from abc import ABC, abstractmethod from argparse import ArgumentParser class lowerCamelCase ( __lowerCAmelCase ): @staticmethod @abstractmethod def _lowerCamelCase ( lowercase_ ) -> Tuple: raise NotImplementedError() @abstractmethod def _lowerCamelCase ( self ) -> Optional[Any]: raise NotImplementedError()
332
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase ( metaclass=__lowerCAmelCase ): snake_case_ = ['''note_seq'''] def __init__( self, *lowercase_, **lowercase_ ) -> str: requires_backends(self, ['note_seq'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]: requires_backends(cls, ['note_seq'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]: requires_backends(cls, ['note_seq'] )
332
1
'''simple docstring''' from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
332
'''simple docstring''' import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor lowerCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase ( __lowerCAmelCase ): def __init__( self, *lowercase_, **lowercase_ ) -> None: warnings.warn( 'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DPTImageProcessor instead.', lowercase_, ) super().__init__(*lowercase_, **lowercase_ )
332
1
'''simple docstring''' from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor lowerCAmelCase_ = transforms.Compose( [ transforms.Resize((2_5_6, 2_5_6)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __magic_name__ ( A ) -> Tuple: if isinstance(A , torch.Tensor ): return image elif isinstance(A , PIL.Image.Image ): snake_case = [image] snake_case = [trans(img.convert('RGB' ) ) for img in image] snake_case = torch.stack(A ) return image class lowerCamelCase ( __lowerCAmelCase ): def __init__( self, lowercase_, lowercase_ ) -> Optional[int]: super().__init__() # make sure scheduler can always be converted to DDIM snake_case = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowercase_, scheduler=lowercase_ ) def _lowerCamelCase ( self, lowercase_ ) -> Dict: if strength < 0 or strength > 1: raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' ) def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ ) -> Any: # get the original timestep using init_timestep snake_case = min(int(num_inference_steps * strength ), lowercase_ ) snake_case = max(num_inference_steps - init_timestep, 0 ) snake_case = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_=None ) -> Union[str, Any]: if not isinstance(lowercase_, (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}''' ) snake_case = image.to(device=lowercase_, dtype=lowercase_ ) if isinstance(lowercase_, lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) snake_case = init_latents.shape snake_case = randn_tensor(lowercase_, generator=lowercase_, device=lowercase_, dtype=lowercase_ ) # get latents print('add noise to latents at timestep', lowercase_ ) snake_case = self.scheduler.add_noise(lowercase_, lowercase_, lowercase_ ) snake_case = init_latents return latents @torch.no_grad() def __call__( self, lowercase_ = None, lowercase_ = 0.8, lowercase_ = 1, lowercase_ = None, lowercase_ = 0.0, lowercase_ = 50, lowercase_ = None, lowercase_ = "pil", lowercase_ = True, ) -> Union[ImagePipelineOutput, Tuple]: self.check_inputs(lowercase_ ) # 2. Preprocess image snake_case = preprocess(lowercase_ ) # 3. set timesteps self.scheduler.set_timesteps(lowercase_, device=self.device ) snake_case , snake_case = self.get_timesteps(lowercase_, lowercase_, self.device ) snake_case = timesteps[:1].repeat(lowercase_ ) # 4. Prepare latent variables snake_case = self.prepare_latents(lowercase_, lowercase_, lowercase_, self.unet.dtype, self.device, lowercase_ ) snake_case = latents # 5. Denoising loop for t in self.progress_bar(lowercase_ ): # 1. predict noise model_output snake_case = self.unet(lowercase_, lowercase_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 snake_case = self.scheduler.step( lowercase_, lowercase_, lowercase_, eta=lowercase_, use_clipped_model_output=lowercase_, generator=lowercase_, ).prev_sample snake_case = (image / 2 + 0.5).clamp(0, 1 ) snake_case = image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": snake_case = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=lowercase_ )
332
'''simple docstring''' import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset lowerCAmelCase_ = pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) lowerCAmelCase_ = dataset.iloc[:, 1:2].values lowerCAmelCase_ = dataset.iloc[:, 2].values lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0) lowerCAmelCase_ = PolynomialFeatures(degree=4) lowerCAmelCase_ = poly_reg.fit_transform(X) lowerCAmelCase_ = LinearRegression() pol_reg.fit(X_poly, y) def __magic_name__ ( ) -> Any: plt.scatter(A , A , color='red' ) plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' ) plt.title('Truth or Bluff (Linear Regression)' ) plt.xlabel('Position level' ) plt.ylabel('Salary' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
332
1
'''simple docstring''' from collections import defaultdict def __magic_name__ ( A , A ) -> bool: snake_case = first_str.lower().strip() snake_case = second_str.lower().strip() # Remove whitespace snake_case = first_str.replace(' ' , '' ) snake_case = second_str.replace(' ' , '' ) # Strings of different lengths are not anagrams if len(A ) != len(A ): return False # Default values for count should be 0 snake_case = defaultdict(A ) # For each character in input strings, # increment count in the corresponding for i in range(len(A ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() lowerCAmelCase_ = input("Enter the first string ").strip() lowerCAmelCase_ = input("Enter the second string ").strip() lowerCAmelCase_ = check_anagrams(input_a, input_b) print(f"{input_a} and {input_b} are {'' if status else 'not '}anagrams.")
332
'''simple docstring''' import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''''' snake_case_ = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) snake_case_ = None # compression type in fsspec. ex: "gzip" snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str: super().__init__(self, **lowercase_ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode snake_case = fsspec.open( lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={ 'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459 'trust_env': True, # Enable reading proxy env variables. **(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed. }, **(target_options or {}), ) snake_case = os.path.basename(self.file.path.split('::' )[0] ) snake_case = ( self.compressed_name[: self.compressed_name.rindex('.' )] if '.' in self.compressed_name else self.compressed_name ) snake_case = None @classmethod def _lowerCamelCase ( cls, lowercase_ ) -> Any: # compressed file paths are always relative to the archive root return super()._strip_protocol(lowercase_ ).lstrip('/' ) def _lowerCamelCase ( self ) -> Optional[Any]: if self.dir_cache is None: snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name} snake_case = {f['name']: f} def _lowerCamelCase ( self, lowercase_ ) -> str: return self.file.open().read() def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any: snake_case = self._strip_protocol(lowercase_ ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''bz2''' snake_case_ = '''bz2''' snake_case_ = '''.bz2''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''gzip''' snake_case_ = '''gzip''' snake_case_ = '''.gz''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''lz4''' snake_case_ = '''lz4''' snake_case_ = '''.lz4''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''xz''' snake_case_ = '''xz''' snake_case_ = '''.xz''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''zstd''' snake_case_ = '''zstd''' snake_case_ = '''.zst''' def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]: super().__init__( fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 snake_case = self.file.__enter__ class lowerCamelCase : def __init__( self, lowercase_ ) -> List[Any]: snake_case = file_ def __enter__( self ) -> Dict: self._file.__enter__() return self def __exit__( self, *lowercase_, **lowercase_ ) -> Dict: self._file.__exit__(*lowercase_, **lowercase_ ) def __iter__( self ) -> List[str]: return iter(self._file ) def _lowerCamelCase ( self ) -> List[str]: return next(self._file ) def __getattr__( self, lowercase_ ) -> List[Any]: return getattr(self._file, lowercase_ ) def fixed_enter(*lowercase_, **lowercase_ ): return WrappedFile(_enter(*lowercase_, **lowercase_ ) ) snake_case = fixed_enter
332
1
'''simple docstring''' import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class lowerCamelCase : def __init__( self, lowercase_, lowercase_=13, lowercase_=7, lowercase_=False, lowercase_=True, lowercase_=False, lowercase_=True, lowercase_=33, lowercase_=32, lowercase_=5, lowercase_=4, lowercase_=37, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=16, lowercase_=2, lowercase_=0.02, lowercase_=3, lowercase_=4, lowercase_=None, ) -> Dict: snake_case = parent snake_case = batch_size snake_case = seq_length snake_case = is_training snake_case = use_input_mask snake_case = use_token_type_ids snake_case = use_labels snake_case = vocab_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = intermediate_size snake_case = hidden_act snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_vocab_size snake_case = type_sequence_label_size snake_case = initializer_range snake_case = num_labels snake_case = num_choices snake_case = scope def _lowerCamelCase ( self ) -> Optional[int]: snake_case = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) snake_case = None if self.use_input_mask: snake_case = random_attention_mask([self.batch_size, self.seq_length] ) snake_case = None snake_case = None snake_case = None if self.use_labels: snake_case = ids_tensor([self.batch_size], self.type_sequence_label_size ) snake_case = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) snake_case = ids_tensor([self.batch_size], self.num_choices ) snake_case = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowerCamelCase ( self ) -> Optional[int]: return EsmConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, pad_token_id=1, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> List[str]: snake_case = EsmModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case = model(lowercase_, attention_mask=lowercase_ ) snake_case = model(lowercase_ ) snake_case = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) ) def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> str: snake_case = EsmForMaskedLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case = model(lowercase_, attention_mask=lowercase_, labels=lowercase_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> Union[str, Any]: snake_case = self.num_labels snake_case = EsmForTokenClassification(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case = model(lowercase_, attention_mask=lowercase_, labels=lowercase_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = self.prepare_config_and_inputs() ( ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ) = config_and_inputs snake_case = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): snake_case_ = False snake_case_ = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) snake_case_ = () snake_case_ = ( { '''feature-extraction''': EsmModel, '''fill-mask''': EsmForMaskedLM, '''text-classification''': EsmForSequenceClassification, '''token-classification''': EsmForTokenClassification, '''zero-shot''': EsmForSequenceClassification, } if is_torch_available() else {} ) snake_case_ = True def _lowerCamelCase ( self ) -> str: snake_case = EsmModelTester(self ) snake_case = ConfigTester(self, config_class=lowercase_, hidden_size=37 ) def _lowerCamelCase ( self ) -> str: self.config_tester.run_common_tests() def _lowerCamelCase ( self ) -> List[str]: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case = type self.model_tester.create_and_check_model(*lowercase_ ) def _lowerCamelCase ( self ) -> int: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowercase_ ) def _lowerCamelCase ( self ) -> Optional[int]: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase_ ) @slow def _lowerCamelCase ( self ) -> Dict: for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case = EsmModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = self.model_tester.prepare_config_and_inputs()[0] snake_case = EsmEmbeddings(config=lowercase_ ) snake_case = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) snake_case = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) snake_case = create_position_ids_from_input_ids(lowercase_, model.padding_idx ) self.assertEqual(position_ids.shape, expected_positions.shape ) self.assertTrue(torch.all(torch.eq(lowercase_, lowercase_ ) ) ) def _lowerCamelCase ( self ) -> List[Any]: snake_case = self.model_tester.prepare_config_and_inputs()[0] snake_case = EsmEmbeddings(config=lowercase_ ) snake_case = torch.empty(2, 4, 30 ) snake_case = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] snake_case = torch.as_tensor([expected_single_positions, expected_single_positions] ) snake_case = embeddings.create_position_ids_from_inputs_embeds(lowercase_ ) self.assertEqual(position_ids.shape, expected_positions.shape ) self.assertTrue(torch.all(torch.eq(lowercase_, lowercase_ ) ) ) @unittest.skip('Esm does not support embedding resizing' ) def _lowerCamelCase ( self ) -> Tuple: pass @unittest.skip('Esm does not support embedding resizing' ) def _lowerCamelCase ( self ) -> Union[str, Any]: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _lowerCamelCase ( self ) -> Any: pass @require_torch class lowerCamelCase ( __lowerCAmelCase ): @slow def _lowerCamelCase ( self ) -> str: with torch.no_grad(): snake_case = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() snake_case = torch.tensor([[0, 1, 2, 3, 4, 5]] ) snake_case = model(lowercase_ )[0] snake_case = 33 snake_case = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape, lowercase_ ) snake_case = torch.tensor( [[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], lowercase_, atol=1E-4 ) ) @slow def _lowerCamelCase ( self ) -> Optional[int]: with torch.no_grad(): snake_case = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() snake_case = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) snake_case = model(lowercase_ )[0] # compare the actual values for a slice. snake_case = torch.tensor( [[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], lowercase_, atol=1E-4 ) )
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A , A , A ) -> int | float: if len(A ) == 0: raise ValueError('find_max() arg is an empty sequence' ) if ( left >= len(A ) or left < -len(A ) or right >= len(A ) or right < -len(A ) ): raise IndexError('list index out of range' ) if left == right: return nums[left] snake_case = (left + right) >> 1 # the middle snake_case = find_max(A , A , A ) # find max in range[left, mid] snake_case = find_max(A , mid + 1 , A ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
332
1
'''simple docstring''' import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) def __magic_name__ ( A , A , A ) -> Tuple: snake_case = WavaVecaForSequenceClassification.from_pretrained(A , config=A ) snake_case = downstream_dict['projector.weight'] snake_case = downstream_dict['projector.bias'] snake_case = downstream_dict['model.post_net.linear.weight'] snake_case = downstream_dict['model.post_net.linear.bias'] return model def __magic_name__ ( A , A , A ) -> str: snake_case = WavaVecaForAudioFrameClassification.from_pretrained(A , config=A ) snake_case = downstream_dict['model.linear.weight'] snake_case = downstream_dict['model.linear.bias'] return model def __magic_name__ ( A , A , A ) -> int: snake_case = WavaVecaForXVector.from_pretrained(A , config=A ) snake_case = downstream_dict['connector.weight'] snake_case = downstream_dict['connector.bias'] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): snake_case = downstream_dict[ F'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] snake_case = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] snake_case = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight'] snake_case = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias'] snake_case = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight'] snake_case = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias'] snake_case = downstream_dict['objective.W'] return model @torch.no_grad() def __magic_name__ ( A , A , A , A ) -> Optional[int]: snake_case = torch.load(A , map_location='cpu' ) snake_case = checkpoint['Downstream'] snake_case = WavaVecaConfig.from_pretrained(A ) snake_case = WavaVecaFeatureExtractor.from_pretrained( A , return_attention_mask=A , do_normalize=A ) snake_case = hf_config.architectures[0] if arch.endswith('ForSequenceClassification' ): snake_case = convert_classification(A , A , A ) elif arch.endswith('ForAudioFrameClassification' ): snake_case = convert_diarization(A , A , A ) elif arch.endswith('ForXVector' ): snake_case = convert_xvector(A , A , A ) else: raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: snake_case = checkpoint['Featurizer']['weights'] hf_feature_extractor.save_pretrained(A ) hf_model.save_pretrained(A ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( "--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model." ) parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.") parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.") lowerCAmelCase_ = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
332
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = 42 class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ): @register_to_config def __init__( self, lowercase_ = 3, lowercase_ = 3, lowercase_ = ("DownEncoderBlock2D",), lowercase_ = ("UpDecoderBlock2D",), lowercase_ = (64,), lowercase_ = 1, lowercase_ = "silu", lowercase_ = 3, lowercase_ = 32, lowercase_ = 256, lowercase_ = 32, lowercase_ = None, lowercase_ = 0.18_215, lowercase_ = "group", ) -> str: super().__init__() # pass init params to Encoder snake_case = Encoder( in_channels=lowercase_, out_channels=lowercase_, down_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, double_z=lowercase_, ) snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels snake_case = nn.Convad(lowercase_, lowercase_, 1 ) snake_case = VectorQuantizer(lowercase_, lowercase_, beta=0.25, remap=lowercase_, sane_index_shape=lowercase_ ) snake_case = nn.Convad(lowercase_, lowercase_, 1 ) # pass init params to Decoder snake_case = Decoder( in_channels=lowercase_, out_channels=lowercase_, up_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, norm_type=lowercase_, ) @apply_forward_hook def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> VQEncoderOutput: snake_case = self.encoder(lowercase_ ) snake_case = self.quant_conv(lowercase_ ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowercase_ ) @apply_forward_hook def _lowerCamelCase ( self, lowercase_, lowercase_ = False, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: # also go through quantization layer if not force_not_quantize: snake_case , snake_case , snake_case = self.quantize(lowercase_ ) else: snake_case = h snake_case = self.post_quant_conv(lowercase_ ) snake_case = self.decoder(lowercase_, quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: snake_case = sample snake_case = self.encode(lowercase_ ).latents snake_case = self.decode(lowercase_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowercase_ )
332
1
'''simple docstring''' def __magic_name__ ( A ) -> list[int]: snake_case = len(A ) for i in range(A ): for j in range(i + 1 , A ): if numbers[j] < numbers[i]: snake_case , snake_case = numbers[j], numbers[i] return numbers if __name__ == "__main__": lowerCAmelCase_ = input("Enter numbers separated by a comma:\n").strip() lowerCAmelCase_ = [int(item) for item in user_input.split(",")] print(exchange_sort(unsorted))
332
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int: snake_case = [0] snake_case = 42 for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target snake_case = 0 # the area corresponding to the grid that gives the product closest to target snake_case = 0 # an estimate of b, using the quadratic formula snake_case = 42 # the largest integer less than b_estimate snake_case = 42 # the largest integer less than b_estimate snake_case = 42 # the triangle number corresponding to b_floor snake_case = 42 # the triangle number corresponding to b_ceil snake_case = 42 for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 snake_case = floor(A ) snake_case = ceil(A ) snake_case = triangle_numbers[b_floor] snake_case = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): snake_case = triangle_b_first_guess * triangle_a snake_case = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): snake_case = triangle_b_second_guess * triangle_a snake_case = idx_a * b_ceil return area if __name__ == "__main__": print(f"{solution() = }")
332
1
'''simple docstring''' from __future__ import annotations class lowerCamelCase : def __init__( self, lowercase_ = 0 ) -> Optional[int]: snake_case = key def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> list[str]: assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ ) snake_case = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(lowercase_ ) ^ key ) for ch in content] def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> list[str]: assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ ) snake_case = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(lowercase_ ) ^ key ) for ch in content] def _lowerCamelCase ( self, lowercase_, lowercase_ = 0 ) -> str: assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ ) snake_case = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned snake_case = '' for ch in content: ans += chr(ord(lowercase_ ) ^ key ) return ans def _lowerCamelCase ( self, lowercase_, lowercase_ = 0 ) -> str: assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ ) snake_case = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned snake_case = '' for ch in content: ans += chr(ord(lowercase_ ) ^ key ) return ans def _lowerCamelCase ( self, lowercase_, lowercase_ = 0 ) -> bool: assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ ) try: with open(lowercase_ ) as fin, open('encrypt.out', 'w+' ) as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(lowercase_, lowercase_ ) ) except OSError: return False return True def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> bool: assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ ) try: with open(lowercase_ ) as fin, open('decrypt.out', 'w+' ) as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(lowercase_, lowercase_ ) ) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
332
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = { "configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"], "processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["VisionTextDualEncoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["FlaxVisionTextDualEncoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["TFVisionTextDualEncoderModel"] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
332
1
'''simple docstring''' import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Union[str, Any]: snake_case = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split() snake_case = dict(zip(lowercase_, range(len(lowercase_ ) ) ) ) snake_case = { 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>', } snake_case = { 'feature_size': 1, 'padding_value': 0.0, 'sampling_rate': 16000, 'return_attention_mask': False, 'do_normalize': True, } snake_case = tempfile.mkdtemp() snake_case = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] ) snake_case = os.path.join(self.tmpdirname, lowercase_ ) with open(self.vocab_file, 'w', encoding='utf-8' ) as fp: fp.write(json.dumps(lowercase_ ) + '\n' ) with open(self.feature_extraction_file, 'w', encoding='utf-8' ) as fp: fp.write(json.dumps(lowercase_ ) + '\n' ) # load decoder from hub snake_case = 'hf-internal-testing/ngram-beam-search-decoder' def _lowerCamelCase ( self, **lowercase_ ) -> List[Any]: snake_case = self.add_kwargs_tokens_map.copy() kwargs.update(lowercase_ ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname, **lowercase_ ) def _lowerCamelCase ( self, **lowercase_ ) -> Optional[Any]: return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname, **lowercase_ ) def _lowerCamelCase ( self, **lowercase_ ) -> List[str]: return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **lowercase_ ) def _lowerCamelCase ( self ) -> Tuple: shutil.rmtree(self.tmpdirname ) def _lowerCamelCase ( self ) -> List[Any]: snake_case = self.get_tokenizer() snake_case = self.get_feature_extractor() snake_case = self.get_decoder() snake_case = WavaVecaProcessorWithLM(tokenizer=lowercase_, feature_extractor=lowercase_, decoder=lowercase_ ) processor.save_pretrained(self.tmpdirname ) snake_case = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer, lowercase_ ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor, lowercase_ ) # decoder self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, ) self.assertIsInstance(processor.decoder, lowercase_ ) def _lowerCamelCase ( self ) -> Optional[int]: snake_case = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match snake_case = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha, 5.0 ) self.assertEqual(processor.language_model.beta, 3.0 ) self.assertEqual(processor.language_model.score_boundary, -7.0 ) self.assertEqual(processor.language_model.unk_score_offset, 3 ) def _lowerCamelCase ( self ) -> int: snake_case = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['xx'] ) with self.assertRaisesRegex(lowercase_, 'include' ): WavaVecaProcessorWithLM( tokenizer=lowercase_, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() ) def _lowerCamelCase ( self ) -> int: snake_case = self.get_feature_extractor() snake_case = self.get_tokenizer() snake_case = self.get_decoder() snake_case = WavaVecaProcessorWithLM(tokenizer=lowercase_, feature_extractor=lowercase_, decoder=lowercase_ ) snake_case = floats_list((3, 1000) ) snake_case = feature_extractor(lowercase_, return_tensors='np' ) snake_case = processor(lowercase_, return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 ) def _lowerCamelCase ( self ) -> Union[str, Any]: snake_case = self.get_feature_extractor() snake_case = self.get_tokenizer() snake_case = self.get_decoder() snake_case = WavaVecaProcessorWithLM(tokenizer=lowercase_, feature_extractor=lowercase_, decoder=lowercase_ ) snake_case = 'This is a test string' snake_case = processor(text=lowercase_ ) snake_case = tokenizer(lowercase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key] ) def _lowerCamelCase ( self, lowercase_=(2, 10, 16), lowercase_=77 ) -> List[str]: np.random.seed(lowercase_ ) return np.random.rand(*lowercase_ ) def _lowerCamelCase ( self ) -> Dict: snake_case = self.get_feature_extractor() snake_case = self.get_tokenizer() snake_case = self.get_decoder() snake_case = WavaVecaProcessorWithLM(tokenizer=lowercase_, feature_extractor=lowercase_, decoder=lowercase_ ) snake_case = self._get_dummy_logits(shape=(10, 16), seed=13 ) snake_case = processor.decode(lowercase_ ) snake_case = decoder.decode_beams(lowercase_ )[0] self.assertEqual(decoded_decoder[0], decoded_processor.text ) self.assertEqual('</s> <s> </s>', decoded_processor.text ) self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score ) @parameterized.expand([[None], ['fork'], ['spawn']] ) def _lowerCamelCase ( self, lowercase_ ) -> Tuple: snake_case = self.get_feature_extractor() snake_case = self.get_tokenizer() snake_case = self.get_decoder() snake_case = WavaVecaProcessorWithLM(tokenizer=lowercase_, feature_extractor=lowercase_, decoder=lowercase_ ) snake_case = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: snake_case = processor.batch_decode(lowercase_ ) else: with get_context(lowercase_ ).Pool() as pool: snake_case = processor.batch_decode(lowercase_, lowercase_ ) snake_case = list(lowercase_ ) with get_context('fork' ).Pool() as p: snake_case = decoder.decode_beams_batch(lowercase_, lowercase_ ) snake_case , snake_case , snake_case = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(lowercase_, decoded_processor.text ) self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'], decoded_processor.text ) self.assertListEqual(lowercase_, decoded_processor.logit_score ) self.assertListEqual(lowercase_, decoded_processor.lm_score ) def _lowerCamelCase ( self ) -> Tuple: snake_case = self.get_feature_extractor() snake_case = self.get_tokenizer() snake_case = self.get_decoder() snake_case = WavaVecaProcessorWithLM(tokenizer=lowercase_, feature_extractor=lowercase_, decoder=lowercase_ ) snake_case = self._get_dummy_logits() snake_case = 15 snake_case = -20.0 snake_case = -4.0 snake_case = processor.batch_decode( lowercase_, beam_width=lowercase_, beam_prune_logp=lowercase_, token_min_logp=lowercase_, ) snake_case = decoded_processor_out.text snake_case = list(lowercase_ ) with get_context('fork' ).Pool() as pool: snake_case = decoder.decode_beams_batch( lowercase_, lowercase_, beam_width=lowercase_, beam_prune_logp=lowercase_, token_min_logp=lowercase_, ) snake_case = [d[0][0] for d in decoded_decoder_out] snake_case = [d[0][2] for d in decoded_decoder_out] snake_case = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(lowercase_, lowercase_ ) self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'], lowercase_ ) self.assertTrue(np.array_equal(lowercase_, decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.054, -18.447], lowercase_, atol=1E-3 ) ) self.assertTrue(np.array_equal(lowercase_, decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.554, -13.9_474], lowercase_, atol=1E-3 ) ) def _lowerCamelCase ( self ) -> Union[str, Any]: snake_case = self.get_feature_extractor() snake_case = self.get_tokenizer() snake_case = self.get_decoder() snake_case = WavaVecaProcessorWithLM(tokenizer=lowercase_, feature_extractor=lowercase_, decoder=lowercase_ ) snake_case = self._get_dummy_logits() snake_case = 2.0 snake_case = 5.0 snake_case = -20.0 snake_case = True snake_case = processor.batch_decode( lowercase_, alpha=lowercase_, beta=lowercase_, unk_score_offset=lowercase_, lm_score_boundary=lowercase_, ) snake_case = decoded_processor_out.text snake_case = list(lowercase_ ) decoder.reset_params( alpha=lowercase_, beta=lowercase_, unk_score_offset=lowercase_, lm_score_boundary=lowercase_, ) with get_context('fork' ).Pool() as pool: snake_case = decoder.decode_beams_batch( lowercase_, lowercase_, ) snake_case = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(lowercase_, lowercase_ ) self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'], lowercase_ ) snake_case = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha, 2.0 ) self.assertEqual(lm_model.beta, 5.0 ) self.assertEqual(lm_model.unk_score_offset, -20.0 ) self.assertEqual(lm_model.score_boundary, lowercase_ ) def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' ) snake_case = processor.decoder.model_container[processor.decoder._model_key] snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute() snake_case = os.listdir(lowercase_ ) snake_case = ['alphabet.json', 'language_model'] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(lowercase_, lowercase_ ) def _lowerCamelCase ( self ) -> List[str]: snake_case = snapshot_download('hf-internal-testing/processor_with_lm' ) snake_case = WavaVecaProcessorWithLM.from_pretrained(lowercase_ ) snake_case = processor.decoder.model_container[processor.decoder._model_key] snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute() snake_case = os.listdir(lowercase_ ) snake_case = os.listdir(lowercase_ ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(lowercase_, lowercase_ ) def _lowerCamelCase ( self ) -> Tuple: snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' ) snake_case = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' ) snake_case = floats_list((3, 1000) ) snake_case = processor_wavaveca(lowercase_, return_tensors='np' ) snake_case = processor_auto(lowercase_, return_tensors='np' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum(), input_auto[key].sum(), delta=1E-2 ) snake_case = self._get_dummy_logits() snake_case = processor_wavaveca.batch_decode(lowercase_ ) snake_case = processor_auto.batch_decode(lowercase_ ) self.assertListEqual(decoded_wavaveca.text, decoded_auto.text ) def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = self.get_feature_extractor() snake_case = self.get_tokenizer() snake_case = self.get_decoder() snake_case = WavaVecaProcessorWithLM(tokenizer=lowercase_, feature_extractor=lowercase_, decoder=lowercase_ ) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg='`processor` and `feature_extractor` model input names do not match', ) @staticmethod def _lowerCamelCase ( lowercase_, lowercase_ ) -> Dict: snake_case = [d[key] for d in offsets] return retrieved_list def _lowerCamelCase ( self ) -> List[str]: snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' ) snake_case = self._get_dummy_logits()[0] snake_case = processor.decode(lowercase_, output_word_offsets=lowercase_ ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ), 4 ) self.assertTrue('text' in outputs ) self.assertTrue('word_offsets' in outputs ) self.assertTrue(isinstance(lowercase_, lowercase_ ) ) self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'], 'word' ) ), outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['word_offsets'], 'word' ), ['<s>', '<s>', '</s>'] ) self.assertListEqual(self.get_from_offsets(outputs['word_offsets'], 'start_offset' ), [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['word_offsets'], 'end_offset' ), [1, 3, 5] ) def _lowerCamelCase ( self ) -> str: snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' ) snake_case = self._get_dummy_logits() snake_case = processor.batch_decode(lowercase_, output_word_offsets=lowercase_ ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ), 4 ) self.assertTrue('text' in outputs ) self.assertTrue('word_offsets' in outputs ) self.assertTrue(isinstance(lowercase_, lowercase_ ) ) self.assertListEqual( [' '.join(self.get_from_offsets(lowercase_, 'word' ) ) for o in outputs['word_offsets']], outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0], 'word' ), ['<s>', '<s>', '</s>'] ) self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0], 'start_offset' ), [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0], 'end_offset' ), [1, 3, 5] ) @slow @require_torch @require_torchaudio def _lowerCamelCase ( self ) -> int: import torch snake_case = load_dataset('common_voice', 'en', split='train', streaming=lowercase_ ) snake_case = ds.cast_column('audio', datasets.Audio(sampling_rate=16000 ) ) snake_case = iter(lowercase_ ) snake_case = next(lowercase_ ) snake_case = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' ) snake_case = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train snake_case = processor(sample['audio']['array'], return_tensors='pt' ).input_values with torch.no_grad(): snake_case = model(lowercase_ ).logits.cpu().numpy() snake_case = processor.decode(logits[0], output_word_offsets=lowercase_ ) snake_case = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate snake_case = [ { 'start_time': d['start_offset'] * time_offset, 'end_time': d['end_offset'] * time_offset, 'word': d['word'], } for d in output['word_offsets'] ] snake_case = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL' # output words self.assertEqual(' '.join(self.get_from_offsets(lowercase_, 'word' ) ), lowercase_ ) self.assertEqual(' '.join(self.get_from_offsets(lowercase_, 'word' ) ), output.text ) # output times snake_case = torch.tensor(self.get_from_offsets(lowercase_, 'start_time' ) ) snake_case = torch.tensor(self.get_from_offsets(lowercase_, 'end_time' ) ) # fmt: off snake_case = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] ) snake_case = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] ) # fmt: on self.assertTrue(torch.allclose(lowercase_, lowercase_, atol=0.01 ) ) self.assertTrue(torch.allclose(lowercase_, lowercase_, atol=0.01 ) )
332
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCamelCase : snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} ) snake_case_ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) snake_case_ = field( default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , ) snake_case_ = field( default=64 , metadata={ '''help''': ( '''The maximum number of tokens for the question. Questions longer than this will ''' '''be truncated to this length.''' ) } , ) snake_case_ = field( default=30 , metadata={ '''help''': ( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ) } , ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} ) snake_case_ = field( default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) snake_case_ = field( default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) snake_case_ = field( default=0 , metadata={ '''help''': ( '''language id of input for language-specific xlm models (see''' ''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)''' ) } , ) snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} ) class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''train''' snake_case_ = '''dev''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int: snake_case = args snake_case = is_language_sensitive snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(lowercase_, lowercase_ ): try: snake_case = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) snake_case = mode # Load data features from cache or dataset file snake_case = 'v2' if args.version_2_with_negative else 'v1' snake_case = os.path.join( cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. snake_case = cached_features_file + '.lock' with FileLock(lowercase_ ): if os.path.exists(lowercase_ ) and not args.overwrite_cache: snake_case = time.time() snake_case = torch.load(lowercase_ ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. snake_case = self.old_features['features'] snake_case = self.old_features.get('dataset', lowercase_ ) snake_case = self.old_features.get('examples', lowercase_ ) logger.info( F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' ' future run' ) else: if mode == Split.dev: snake_case = self.processor.get_dev_examples(args.data_dir ) else: snake_case = self.processor.get_train_examples(args.data_dir ) snake_case , snake_case = squad_convert_examples_to_features( examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, ) snake_case = time.time() torch.save( {'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self ) -> Tuple: return len(self.features ) def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]: # Convert to Tensors and build dataset snake_case = self.features[i] snake_case = torch.tensor(feature.input_ids, dtype=torch.long ) snake_case = torch.tensor(feature.attention_mask, dtype=torch.long ) snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long ) snake_case = torch.tensor(feature.cls_index, dtype=torch.long ) snake_case = torch.tensor(feature.p_mask, dtype=torch.float ) snake_case = torch.tensor(feature.is_impossible, dtype=torch.float ) snake_case = { 'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'cls_index': cls_index, 'p_mask': p_mask} ) if self.args.version_2_with_negative: inputs.update({'is_impossible': is_impossible} ) if self.is_language_sensitive: inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: snake_case = torch.tensor(feature.start_position, dtype=torch.long ) snake_case = torch.tensor(feature.end_position, dtype=torch.long ) inputs.update({'start_positions': start_positions, 'end_positions': end_positions} ) return inputs
332
1
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A , A ) -> float: snake_case = sorted(numsa + numsa ) snake_case , snake_case = divmod(len(A ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase_ = [float(x) for x in input("Enter the elements of first array: ").split()] lowerCAmelCase_ = [float(x) for x in input("Enter the elements of second array: ").split()] print(f"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
332
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def __magic_name__ ( A , A , A ) -> Any: # Initialise PyTorch model snake_case = BertConfig.from_json_file(A ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case = BertForPreTraining(A ) # Load weights from tf checkpoint load_tf_weights_in_bert(A , A , A ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , A ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--bert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
332
1
'''simple docstring''' import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset lowerCAmelCase_ = pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) lowerCAmelCase_ = dataset.iloc[:, 1:2].values lowerCAmelCase_ = dataset.iloc[:, 2].values lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0) lowerCAmelCase_ = PolynomialFeatures(degree=4) lowerCAmelCase_ = poly_reg.fit_transform(X) lowerCAmelCase_ = LinearRegression() pol_reg.fit(X_poly, y) def __magic_name__ ( ) -> Any: plt.scatter(A , A , color='red' ) plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' ) plt.title('Truth or Bluff (Linear Regression)' ) plt.xlabel('Position level' ) plt.ylabel('Salary' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A ) -> list: if len(A ) == 0: return [] snake_case , snake_case = min(A ), max(A ) snake_case = int(max_value - min_value ) + 1 snake_case = [[] for _ in range(A )] for i in my_list: buckets[int(i - min_value )].append(A ) return [v for bucket in buckets for v in sorted(A )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
332
1
'''simple docstring''' from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image lowerCAmelCase_ = ["text", "image", "audio"] def __magic_name__ ( A ) -> Optional[int]: snake_case = [] for input_type in input_types: if input_type == "text": inputs.append('Text input' ) elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((5_1_2, 5_1_2) ) ) elif input_type == "audio": inputs.append(torch.ones(3_0_0_0 ) ) elif isinstance(A , A ): inputs.append(create_inputs(A ) ) else: raise ValueError(F'''Invalid type requested: {input_type}''' ) return inputs def __magic_name__ ( A ) -> int: snake_case = [] for output in outputs: if isinstance(A , (str, AgentText) ): output_types.append('text' ) elif isinstance(A , (Image.Image, AgentImage) ): output_types.append('image' ) elif isinstance(A , (torch.Tensor, AgentAudio) ): output_types.append('audio' ) else: raise ValueError(F'''Invalid output: {output}''' ) return output_types @is_tool_test class lowerCamelCase : def _lowerCamelCase ( self ) -> Any: self.assertTrue(hasattr(self.tool, 'inputs' ) ) self.assertTrue(hasattr(self.tool, 'outputs' ) ) snake_case = self.tool.inputs for _input in inputs: if isinstance(_input, lowercase_ ): for __input in _input: self.assertTrue(__input in authorized_types ) else: self.assertTrue(_input in authorized_types ) snake_case = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types ) def _lowerCamelCase ( self ) -> List[str]: snake_case = create_inputs(self.tool.inputs ) snake_case = self.tool(*lowercase_ ) # There is a single output if len(self.tool.outputs ) == 1: snake_case = [outputs] self.assertListEqual(output_types(lowercase_ ), self.tool.outputs ) def _lowerCamelCase ( self ) -> Optional[int]: self.assertTrue(hasattr(self.tool, 'description' ) ) self.assertTrue(hasattr(self.tool, 'default_checkpoint' ) ) self.assertTrue(self.tool.description.startswith('This is a tool that' ) ) def _lowerCamelCase ( self ) -> Any: snake_case = create_inputs(self.tool.inputs ) snake_case = self.tool(*lowercase_ ) if not isinstance(lowercase_, lowercase_ ): snake_case = [outputs] self.assertEqual(len(lowercase_ ), len(self.tool.outputs ) ) for output, output_type in zip(lowercase_, self.tool.outputs ): snake_case = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(lowercase_, lowercase_ ) ) def _lowerCamelCase ( self ) -> List[Any]: snake_case = create_inputs(self.tool.inputs ) snake_case = [] for _input, input_type in zip(lowercase_, self.tool.inputs ): if isinstance(lowercase_, lowercase_ ): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] ) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) ) # Should not raise an error snake_case = self.tool(*lowercase_ ) if not isinstance(lowercase_, lowercase_ ): snake_case = [outputs] self.assertEqual(len(lowercase_ ), len(self.tool.outputs ) )
332
'''simple docstring''' def __magic_name__ ( A ) -> float: return 1_0 - x * x def __magic_name__ ( A , A ) -> float: # Bolzano theory in order to find if there is a root between a and b if equation(A ) * equation(A ) >= 0: raise ValueError('Wrong space!' ) snake_case = a while (b - a) >= 0.01: # Find middle point snake_case = (a + b) / 2 # Check if middle point is root if equation(A ) == 0.0: break # Decide the side to repeat the steps if equation(A ) * equation(A ) < 0: snake_case = c else: snake_case = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
332
1
'''simple docstring''' from __future__ import annotations class lowerCamelCase : def __init__( self, lowercase_, lowercase_ ) -> str: snake_case , snake_case = text, pattern snake_case , snake_case = len(lowercase_ ), len(lowercase_ ) def _lowerCamelCase ( self, lowercase_ ) -> int: for i in range(self.patLen - 1, -1, -1 ): if char == self.pattern[i]: return i return -1 def _lowerCamelCase ( self, lowercase_ ) -> int: for i in range(self.patLen - 1, -1, -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def _lowerCamelCase ( self ) -> list[int]: # searches pattern in text and returns index positions snake_case = [] for i in range(self.textLen - self.patLen + 1 ): snake_case = self.mismatch_in_text(lowercase_ ) if mismatch_index == -1: positions.append(lowercase_ ) else: snake_case = self.match_in_pattern(self.text[mismatch_index] ) snake_case = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions lowerCAmelCase_ = "ABAABA" lowerCAmelCase_ = "AB" lowerCAmelCase_ = BoyerMooreSearch(text, pattern) lowerCAmelCase_ = bms.bad_character_heuristic() if len(positions) == 0: print("No match found") else: print("Pattern found in following positions: ") print(positions)
332
'''simple docstring''' import pytest lowerCAmelCase_ = "__dummy_dataset1__" lowerCAmelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n" @pytest.fixture def __magic_name__ ( ) -> List[Any]: return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def __magic_name__ ( ) -> Union[str, Any]: return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def __magic_name__ ( A , A , A ) -> Optional[int]: snake_case = dataset_loading_script_name snake_case = tmp_path / 'datasets' / script_name script_dir.mkdir(parents=A ) snake_case = script_dir / F'''{script_name}.py''' with open(A , 'w' ) as f: f.write(A ) return str(A )
332
1
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=__lowerCAmelCase ) class lowerCamelCase ( __lowerCAmelCase ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization snake_case_ = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) snake_case_ = Features({'''text''': Value('''string''' )} ) snake_case_ = Features({'''labels''': ClassLabel} ) snake_case_ = "text" snake_case_ = "labels" def _lowerCamelCase ( self, lowercase_ ) -> Tuple: if self.label_column not in features: raise ValueError(F'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column], lowercase_ ): raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' ) snake_case = copy.deepcopy(self ) snake_case = self.label_schema.copy() snake_case = features[self.label_column] snake_case = label_schema return task_template @property def _lowerCamelCase ( self ) -> Dict[str, str]: return { self.text_column: "text", self.label_column: "labels", }
332
'''simple docstring''' from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time lowerCAmelCase_ = Lock() def __magic_name__ ( A , A , A , A , A , A , A ) -> Any: global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 1_0 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(A ) process_lock.release() # receive your right neighbor's value process_lock.acquire() snake_case = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left snake_case = min(A , A ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(A ) process_lock.release() # receive your left neighbor's value process_lock.acquire() snake_case = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right snake_case = max(A , A ) # after all swaps are performed, send the values back to main result_pipe[1].send(A ) def __magic_name__ ( A ) -> str: snake_case = [] snake_case = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop snake_case = Pipe() snake_case = Pipe() process_array_.append( Process( target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) snake_case = temp_rs snake_case = temp_rr for i in range(1 , len(A ) - 1 ): snake_case = Pipe() snake_case = Pipe() process_array_.append( Process( target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) snake_case = temp_rs snake_case = temp_rr process_array_.append( Process( target=A , args=( len(A ) - 1, arr[len(A ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(A ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(A ) ): snake_case = result_pipe[p][0].recv() process_array_[p].join() return arr def __magic_name__ ( ) -> Tuple: snake_case = list(range(1_0 , 0 , -1 ) ) print('Initial List' ) print(*A ) snake_case = odd_even_transposition(A ) print('Sorted List\n' ) print(*A ) if __name__ == "__main__": main()
332
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class lowerCamelCase ( unittest.TestCase ): def __init__( self, lowercase_, lowercase_=7, lowercase_=3, lowercase_=10, lowercase_=18, lowercase_=30, lowercase_=400, lowercase_=True, lowercase_=None, lowercase_=True, lowercase_=[0.5, 0.5, 0.5], lowercase_=[0.5, 0.5, 0.5], lowercase_=None, ) -> List[Any]: snake_case = size if size is not None else {'shortest_edge': 18} snake_case = crop_size if crop_size is not None else {'height': 18, 'width': 18} snake_case = parent snake_case = batch_size snake_case = num_channels snake_case = num_frames snake_case = image_size snake_case = min_resolution snake_case = max_resolution snake_case = do_resize snake_case = size snake_case = do_normalize snake_case = image_mean snake_case = image_std snake_case = crop_size def _lowerCamelCase ( self ) -> Any: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class lowerCamelCase ( __lowerCAmelCase , unittest.TestCase ): snake_case_ = VivitImageProcessor if is_vision_available() else None def _lowerCamelCase ( self ) -> List[Any]: snake_case = VivitImageProcessingTester(self ) @property def _lowerCamelCase ( self ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def _lowerCamelCase ( self ) -> Any: snake_case = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_, 'image_mean' ) ) self.assertTrue(hasattr(lowercase_, 'image_std' ) ) self.assertTrue(hasattr(lowercase_, 'do_normalize' ) ) self.assertTrue(hasattr(lowercase_, 'do_resize' ) ) self.assertTrue(hasattr(lowercase_, 'do_center_crop' ) ) self.assertTrue(hasattr(lowercase_, 'size' ) ) def _lowerCamelCase ( self ) -> Optional[int]: snake_case = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'shortest_edge': 18} ) self.assertEqual(image_processor.crop_size, {'height': 18, 'width': 18} ) snake_case = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 ) self.assertEqual(image_processor.size, {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size, {'height': 84, 'width': 84} ) def _lowerCamelCase ( self ) -> Optional[Any]: # Initialize image_processing snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos snake_case = prepare_video_inputs(self.image_processor_tester, equal_resolution=lowercase_ ) for video in video_inputs: self.assertIsInstance(lowercase_, lowercase_ ) self.assertIsInstance(video[0], Image.Image ) # Test not batched input snake_case = image_processing(video_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) # Test batched snake_case = image_processing(lowercase_, return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) def _lowerCamelCase ( self ) -> int: # Initialize image_processing snake_case = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case = prepare_video_inputs(self.image_processor_tester, equal_resolution=lowercase_, numpify=lowercase_ ) for video in video_inputs: self.assertIsInstance(lowercase_, lowercase_ ) self.assertIsInstance(video[0], np.ndarray ) # Test not batched input snake_case = image_processing(video_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) # Test batched snake_case = image_processing(lowercase_, return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) def _lowerCamelCase ( self ) -> str: # Initialize image_processing snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case = prepare_video_inputs(self.image_processor_tester, equal_resolution=lowercase_, torchify=lowercase_ ) for video in video_inputs: self.assertIsInstance(lowercase_, lowercase_ ) self.assertIsInstance(video[0], torch.Tensor ) # Test not batched input snake_case = image_processing(video_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) # Test batched snake_case = image_processing(lowercase_, return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), )
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A ) -> None: create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] ) def __magic_name__ ( A , A , A , A , ) -> None: if index == len(A ): print(A ) return for i in range(len(A ) ): if not index_used[i]: current_sequence.append(sequence[i] ) snake_case = True create_state_space_tree(A , A , index + 1 , A ) current_sequence.pop() snake_case = False lowerCAmelCase_ = [3, 1, 2, 4] generate_all_permutations(sequence) lowerCAmelCase_ = ["A", "B", "C"] generate_all_permutations(sequence_a)
332
1
'''simple docstring''' import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 lowerCAmelCase_ = data_utils.TransfoXLTokenizer lowerCAmelCase_ = data_utils.TransfoXLCorpus lowerCAmelCase_ = data_utils lowerCAmelCase_ = data_utils def __magic_name__ ( A , A , A , A ) -> Union[str, Any]: if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(A , 'rb' ) as fp: snake_case = pickle.load(A , encoding='latin1' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) snake_case = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file'] print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' ) snake_case = corpus.vocab.__dict__ torch.save(A , A ) snake_case = corpus.__dict__ corpus_dict_no_vocab.pop('vocab' , A ) snake_case = pytorch_dump_folder_path + '/' + CORPUS_NAME print(F'''Save dataset to {pytorch_dataset_dump_path}''' ) torch.save(A , A ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model snake_case = os.path.abspath(A ) snake_case = os.path.abspath(A ) print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' ) # Initialise PyTorch model if transfo_xl_config_file == "": snake_case = TransfoXLConfig() else: snake_case = TransfoXLConfig.from_json_file(A ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case = TransfoXLLMHeadModel(A ) snake_case = load_tf_weights_in_transfo_xl(A , A , A ) # Save pytorch-model snake_case = os.path.join(A , A ) snake_case = os.path.join(A , A ) print(F'''Save PyTorch model to {os.path.abspath(A )}''' ) torch.save(model.state_dict() , A ) print(F'''Save configuration file to {os.path.abspath(A )}''' ) with open(A , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--tf_checkpoint_path", default="", type=str, help="An optional path to a TensorFlow checkpoint path to be converted.", ) parser.add_argument( "--transfo_xl_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--transfo_xl_dataset_file", default="", type=str, help="An optional dataset file to be converted in a vocabulary.", ) lowerCAmelCase_ = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
332
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''roberta''' def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple: super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ ) snake_case = vocab_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = hidden_act snake_case = intermediate_size snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_vocab_size snake_case = initializer_range snake_case = layer_norm_eps snake_case = position_embedding_type snake_case = use_cache snake_case = classifier_dropout class lowerCamelCase ( __lowerCAmelCase ): @property def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
332
1
'''simple docstring''' import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''''' snake_case_ = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) snake_case_ = None # compression type in fsspec. ex: "gzip" snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str: super().__init__(self, **lowercase_ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode snake_case = fsspec.open( lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={ 'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459 'trust_env': True, # Enable reading proxy env variables. **(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed. }, **(target_options or {}), ) snake_case = os.path.basename(self.file.path.split('::' )[0] ) snake_case = ( self.compressed_name[: self.compressed_name.rindex('.' )] if '.' in self.compressed_name else self.compressed_name ) snake_case = None @classmethod def _lowerCamelCase ( cls, lowercase_ ) -> Any: # compressed file paths are always relative to the archive root return super()._strip_protocol(lowercase_ ).lstrip('/' ) def _lowerCamelCase ( self ) -> Optional[Any]: if self.dir_cache is None: snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name} snake_case = {f['name']: f} def _lowerCamelCase ( self, lowercase_ ) -> str: return self.file.open().read() def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any: snake_case = self._strip_protocol(lowercase_ ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''bz2''' snake_case_ = '''bz2''' snake_case_ = '''.bz2''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''gzip''' snake_case_ = '''gzip''' snake_case_ = '''.gz''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''lz4''' snake_case_ = '''lz4''' snake_case_ = '''.lz4''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''xz''' snake_case_ = '''xz''' snake_case_ = '''.xz''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''zstd''' snake_case_ = '''zstd''' snake_case_ = '''.zst''' def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]: super().__init__( fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 snake_case = self.file.__enter__ class lowerCamelCase : def __init__( self, lowercase_ ) -> List[Any]: snake_case = file_ def __enter__( self ) -> Dict: self._file.__enter__() return self def __exit__( self, *lowercase_, **lowercase_ ) -> Dict: self._file.__exit__(*lowercase_, **lowercase_ ) def __iter__( self ) -> List[str]: return iter(self._file ) def _lowerCamelCase ( self ) -> List[str]: return next(self._file ) def __getattr__( self, lowercase_ ) -> List[Any]: return getattr(self._file, lowercase_ ) def fixed_enter(*lowercase_, **lowercase_ ): return WrappedFile(_enter(*lowercase_, **lowercase_ ) ) snake_case = fixed_enter
332
'''simple docstring''' import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} lowerCAmelCase_ = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } lowerCAmelCase_ = { "allenai/led-base-16384": 1_6_3_8_4, } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = LEDTokenizer snake_case_ = ['''input_ids''', '''attention_mask'''] def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int: super().__init__( lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, ) snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space: snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) ) snake_case = add_prefix_space snake_case = pre_tok_class(**lowercase_ ) snake_case = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` snake_case = 'post_processor' snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ ) if tokenizer_component_instance: snake_case = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: snake_case = tuple(state['sep'] ) if "cls" in state: snake_case = tuple(state['cls'] ) snake_case = False if state.get('add_prefix_space', lowercase_ ) != add_prefix_space: snake_case = add_prefix_space snake_case = True if state.get('trim_offsets', lowercase_ ) != trim_offsets: snake_case = trim_offsets snake_case = True if changes_to_apply: snake_case = getattr(lowercase_, state.pop('type' ) ) snake_case = component_class(**lowercase_ ) setattr(self.backend_tokenizer, lowercase_, lowercase_ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _lowerCamelCase ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _lowerCamelCase ( self, lowercase_ ) -> Any: snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value snake_case = value def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding: snake_case = kwargs.get('is_split_into_words', lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding: snake_case = kwargs.get('is_split_into_words', lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._encode_plus(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]: snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ ) return tuple(lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict: snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]: snake_case = [self.sep_token_id] snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict: snake_case = super()._pad( encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, ) # Load from model defaults if return_attention_mask is None: snake_case = 'attention_mask' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: snake_case = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ ) if needs_to_be_padded: snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` snake_case = ( encoded_inputs['global_attention_mask'] + [-1] * difference ) elif self.padding_side == "left": snake_case = [-1] * difference + encoded_inputs[ 'global_attention_mask' ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return encoded_inputs
332
1
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = ['''image_processor''', '''tokenizer'''] snake_case_ = '''LayoutLMv3ImageProcessor''' snake_case_ = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''') def __init__( self, lowercase_=None, lowercase_=None, **lowercase_ ) -> Optional[Any]: snake_case = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.', lowercase_, ) snake_case = kwargs.pop('feature_extractor' ) snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(lowercase_, lowercase_ ) def __call__( self, lowercase_, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = True, lowercase_ = False, lowercase_ = None, lowercase_ = None, lowercase_ = 0, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = False, lowercase_ = False, lowercase_ = False, lowercase_ = False, lowercase_ = True, lowercase_ = None, **lowercase_, ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) # first, apply the image processor snake_case = self.image_processor(images=lowercase_, return_tensors=lowercase_ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowercase_, lowercase_ ): snake_case = [text] # add batch dimension (as the image processor always adds a batch dimension) snake_case = features['words'] snake_case = self.tokenizer( text=text if text is not None else features['words'], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['boxes'], word_labels=lowercase_, add_special_tokens=lowercase_, padding=lowercase_, truncation=lowercase_, max_length=lowercase_, stride=lowercase_, pad_to_multiple_of=lowercase_, return_token_type_ids=lowercase_, return_attention_mask=lowercase_, return_overflowing_tokens=lowercase_, return_special_tokens_mask=lowercase_, return_offsets_mapping=lowercase_, return_length=lowercase_, verbose=lowercase_, return_tensors=lowercase_, **lowercase_, ) # add pixel values snake_case = features.pop('pixel_values' ) if return_overflowing_tokens is True: snake_case = self.get_overflowing_images(lowercase_, encoded_inputs['overflow_to_sample_mapping'] ) snake_case = images return encoded_inputs def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> Union[str, Any]: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image snake_case = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowercase_ ) != len(lowercase_ ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' F''' {len(lowercase_ )} and {len(lowercase_ )}''' ) return images_with_overflow def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> Any: return self.tokenizer.batch_decode(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> List[str]: return self.tokenizer.decode(*lowercase_, **lowercase_ ) @property def _lowerCamelCase ( self ) -> Dict: return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def _lowerCamelCase ( self ) -> Optional[int]: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', lowercase_, ) return self.image_processor_class @property def _lowerCamelCase ( self ) -> Optional[int]: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', lowercase_, ) return self.image_processor
332
'''simple docstring''' import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def __magic_name__ ( A ) -> Tuple: snake_case = [] embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', F'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', F'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', F'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', F'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def __magic_name__ ( A , A ) -> Optional[int]: snake_case = [] attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def __magic_name__ ( A ) -> List[Any]: snake_case = [] token.append((F'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') ) return token def __magic_name__ ( ) -> Dict: snake_case = [] head.append(('layernorm.weight', 'norm.weight') ) head.append(('layernorm.bias', 'norm.bias') ) head.append(('classifier.weight', 'head.weight') ) head.append(('classifier.bias', 'head.bias') ) return head def __magic_name__ ( A , A , A , A ) -> int: snake_case = 'imagenet-1k-id2label.json' snake_case = 1_0_0_0 snake_case = 'huggingface/label-files' snake_case = num_labels snake_case = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) ) snake_case = {int(A ): v for k, v in idalabel.items()} snake_case = idalabel snake_case = {v: k for k, v in idalabel.items()} snake_case = snake_case = CvtConfig(num_labels=A , idalabel=A , labelaid=A ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13": snake_case = [1, 2, 1_0] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21": snake_case = [1, 4, 1_6] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: snake_case = [2, 2, 2_0] snake_case = [3, 1_2, 1_6] snake_case = [1_9_2, 7_6_8, 1_0_2_4] snake_case = CvtForImageClassification(A ) snake_case = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) snake_case = image_size snake_case = torch.load(A , map_location=torch.device('cpu' ) ) snake_case = OrderedDict() snake_case = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: snake_case = list_of_state_dict + cls_token(A ) snake_case = list_of_state_dict + embeddings(A ) for cnt in range(config.depth[idx] ): snake_case = list_of_state_dict + attention(A , A ) snake_case = list_of_state_dict + final() for gg in list_of_state_dict: print(A ) for i in range(len(A ) ): snake_case = original_weights[list_of_state_dict[i][1]] model.load_state_dict(A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( "--cvt_model", default="cvt-w24", type=str, help="Name of the cvt model you'd like to convert.", ) parser.add_argument( "--image_size", default=3_8_4, type=int, help="Input Image Size", ) parser.add_argument( "--cvt_file_name", default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth", type=str, help="Input Image Size", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) lowerCAmelCase_ = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
332
1
'''simple docstring''' import argparse import copy def __magic_name__ ( A ) -> Any: snake_case = {} with open(A ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: snake_case = [] _list.append([line.split()[1], line.split()[2]] ) snake_case = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: snake_case = [] _list.append([line.split()[0], line.split()[2]] ) snake_case = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def __magic_name__ ( A , A ) -> Union[str, Any]: with open(A ) as f: snake_case = f.read(1 ) snake_case = start_node snake_case = [] snake_case = start_node snake_case = 0 while visiting not in first_solution: snake_case = 1_0_0_0_0 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(A ) and k[0] not in first_solution: snake_case = k[1] snake_case = k[0] first_solution.append(A ) snake_case = distance_of_first_solution + int(A ) snake_case = best_node first_solution.append(A ) snake_case = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 snake_case = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0_0_0_0 ) return first_solution, distance_of_first_solution def __magic_name__ ( A , A ) -> Any: snake_case = [] for n in solution[1:-1]: snake_case = solution.index(A ) for kn in solution[1:-1]: snake_case = solution.index(A ) if n == kn: continue snake_case = copy.deepcopy(A ) snake_case = kn snake_case = n snake_case = 0 for k in _tmp[:-1]: snake_case = _tmp[_tmp.index(A ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: snake_case = distance + int(i[1] ) _tmp.append(A ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) snake_case = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda A : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def __magic_name__ ( A , A , A , A , A ) -> str: snake_case = 1 snake_case = first_solution snake_case = [] snake_case = distance_of_first_solution snake_case = solution while count <= iters: snake_case = find_neighborhood(A , A ) snake_case = 0 snake_case = neighborhood[index_of_best_solution] snake_case = len(A ) - 1 snake_case = False while not found: snake_case = 0 while i < len(A ): if best_solution[i] != solution[i]: snake_case = best_solution[i] snake_case = solution[i] break snake_case = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) snake_case = True snake_case = best_solution[:-1] snake_case = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: snake_case = cost snake_case = solution else: snake_case = index_of_best_solution + 1 snake_case = neighborhood[index_of_best_solution] if len(A ) >= size: tabu_list.pop(0 ) snake_case = count + 1 return best_solution_ever, best_cost def __magic_name__ ( A=None ) -> int: snake_case = generate_neighbours(args.File ) snake_case , snake_case = generate_first_solution( args.File , A ) snake_case , snake_case = tabu_search( A , A , A , args.Iterations , args.Size , ) print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser(description="Tabu Search") parser.add_argument( "-f", "--File", type=str, help="Path to the file containing the data", required=True, ) parser.add_argument( "-i", "--Iterations", type=int, help="How many iterations the algorithm should perform", required=True, ) parser.add_argument( "-s", "--Size", type=int, help="Size of the tabu list", required=True ) # Pass the arguments to main method main(parser.parse_args())
332
'''simple docstring''' from pathlib import Path import fire def __magic_name__ ( A , A , A ) -> Union[str, Any]: snake_case = Path(A ) snake_case = Path(A ) dest_dir.mkdir(exist_ok=A ) for path in src_dir.iterdir(): snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n] snake_case = dest_dir.joinpath(path.name ) print(A ) dest_path.open('w' ).write('\n'.join(A ) ) if __name__ == "__main__": fire.Fire(minify)
332
1
'''simple docstring''' from __future__ import annotations from random import random from typing import Generic, TypeVar lowerCAmelCase_ = TypeVar("KT") lowerCAmelCase_ = TypeVar("VT") class lowerCamelCase ( Generic[KT, VT] ): def __init__( self, lowercase_ = "root", lowercase_ = None ) -> Optional[int]: snake_case = key snake_case = value snake_case = [] def __repr__( self ) -> str: return F'''Node({self.key}: {self.value})''' @property def _lowerCamelCase ( self ) -> int: return len(self.forward ) class lowerCamelCase ( Generic[KT, VT] ): def __init__( self, lowercase_ = 0.5, lowercase_ = 16 ) -> Any: snake_case = Node[KT, VT]() snake_case = 0 snake_case = p snake_case = max_level def __str__( self ) -> str: snake_case = list(self ) if len(lowercase_ ) == 0: return F'''SkipList(level={self.level})''' snake_case = max((len(str(lowercase_ ) ) for item in items), default=4 ) snake_case = max(lowercase_, 4 ) + 4 snake_case = self.head snake_case = [] snake_case = node.forward.copy() lines.append(F'''[{node.key}]'''.ljust(lowercase_, '-' ) + '* ' * len(lowercase_ ) ) lines.append(' ' * label_size + '| ' * len(lowercase_ ) ) while len(node.forward ) != 0: snake_case = node.forward[0] lines.append( F'''[{node.key}]'''.ljust(lowercase_, '-' ) + ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) ) lines.append(' ' * label_size + '| ' * len(lowercase_ ) ) snake_case = node.forward lines.append('None'.ljust(lowercase_ ) + '* ' * len(lowercase_ ) ) return F'''SkipList(level={self.level})\n''' + "\n".join(lowercase_ ) def __iter__( self ) -> Optional[int]: snake_case = self.head while len(node.forward ) != 0: yield node.forward[0].key snake_case = node.forward[0] def _lowerCamelCase ( self ) -> int: snake_case = 1 while random() < self.p and level < self.max_level: level += 1 return level def _lowerCamelCase ( self, lowercase_ ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]: snake_case = [] snake_case = self.head for i in reversed(range(self.level ) ): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: snake_case = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(lowercase_ ) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward ) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def _lowerCamelCase ( self, lowercase_ ) -> Optional[int]: snake_case , snake_case = self._locate_node(lowercase_ ) if node is not None: for i, update_node in enumerate(lowercase_ ): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: snake_case = node.forward[i] else: snake_case = update_node.forward[:i] def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> Union[str, Any]: snake_case , snake_case = self._locate_node(lowercase_ ) if node is not None: snake_case = value else: snake_case = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1, lowercase_ ): update_vector.append(self.head ) snake_case = level snake_case = Node(lowercase_, lowercase_ ) for i, update_node in enumerate(update_vector[:level] ): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i] ) if update_node.level < i + 1: update_node.forward.append(lowercase_ ) else: snake_case = new_node def _lowerCamelCase ( self, lowercase_ ) -> VT | None: snake_case , snake_case = self._locate_node(lowercase_ ) if node is not None: return node.value return None def __magic_name__ ( ) -> str: snake_case = SkipList() skip_list.insert('Key1' , 3 ) skip_list.insert('Key2' , 1_2 ) skip_list.insert('Key3' , 4_1 ) skip_list.insert('Key4' , -1_9 ) snake_case = skip_list.head snake_case = {} while node.level != 0: snake_case = node.forward[0] snake_case = node.value assert len(A ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 1_2 assert all_values["Key3"] == 4_1 assert all_values["Key4"] == -1_9 def __magic_name__ ( ) -> Optional[int]: snake_case = SkipList() skip_list.insert('Key1' , 1_0 ) skip_list.insert('Key1' , 1_2 ) skip_list.insert('Key5' , 7 ) skip_list.insert('Key7' , 1_0 ) skip_list.insert('Key10' , 5 ) skip_list.insert('Key7' , 7 ) skip_list.insert('Key5' , 5 ) skip_list.insert('Key10' , 1_0 ) snake_case = skip_list.head snake_case = {} while node.level != 0: snake_case = node.forward[0] snake_case = node.value if len(A ) != 4: print() assert len(A ) == 4 assert all_values["Key1"] == 1_2 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 1_0 def __magic_name__ ( ) -> Dict: snake_case = SkipList() assert skip_list.find('Some key' ) is None def __magic_name__ ( ) -> int: snake_case = SkipList() skip_list.insert('Key2' , 2_0 ) assert skip_list.find('Key2' ) == 2_0 skip_list.insert('Some Key' , 1_0 ) skip_list.insert('Key2' , 8 ) skip_list.insert('V' , 1_3 ) assert skip_list.find('Y' ) is None assert skip_list.find('Key2' ) == 8 assert skip_list.find('Some Key' ) == 1_0 assert skip_list.find('V' ) == 1_3 def __magic_name__ ( ) -> Dict: snake_case = SkipList() skip_list.delete('Some key' ) assert len(skip_list.head.forward ) == 0 def __magic_name__ ( ) -> List[str]: snake_case = SkipList() skip_list.insert('Key1' , 1_2 ) skip_list.insert('V' , 1_3 ) skip_list.insert('X' , 1_4 ) skip_list.insert('Key2' , 1_5 ) skip_list.delete('V' ) skip_list.delete('Key2' ) assert skip_list.find('V' ) is None assert skip_list.find('Key2' ) is None def __magic_name__ ( ) -> Optional[Any]: snake_case = SkipList() skip_list.insert('Key1' , 1_2 ) skip_list.insert('V' , 1_3 ) skip_list.insert('X' , 1_4 ) skip_list.insert('Key2' , 1_5 ) skip_list.delete('V' ) assert skip_list.find('V' ) is None assert skip_list.find('X' ) == 1_4 assert skip_list.find('Key1' ) == 1_2 assert skip_list.find('Key2' ) == 1_5 skip_list.delete('X' ) assert skip_list.find('V' ) is None assert skip_list.find('X' ) is None assert skip_list.find('Key1' ) == 1_2 assert skip_list.find('Key2' ) == 1_5 skip_list.delete('Key1' ) assert skip_list.find('V' ) is None assert skip_list.find('X' ) is None assert skip_list.find('Key1' ) is None assert skip_list.find('Key2' ) == 1_5 skip_list.delete('Key2' ) assert skip_list.find('V' ) is None assert skip_list.find('X' ) is None assert skip_list.find('Key1' ) is None assert skip_list.find('Key2' ) is None def __magic_name__ ( ) -> Optional[Any]: snake_case = SkipList() skip_list.insert('Key1' , 1_2 ) skip_list.insert('V' , 1_3 ) skip_list.insert('X' , 1_4_2 ) skip_list.insert('Key2' , 1_5 ) skip_list.delete('X' ) def traverse_keys(A ): yield node.key for forward_node in node.forward: yield from traverse_keys(A ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def __magic_name__ ( ) -> Optional[Any]: def is_sorted(A ): return all(next_item >= item for item, next_item in zip(A , lst[1:] ) ) snake_case = SkipList() for i in range(1_0 ): skip_list.insert(A , A ) assert is_sorted(list(A ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(A ) ) skip_list.insert(-1_2 , -1_2 ) skip_list.insert(7_7 , 7_7 ) assert is_sorted(list(A ) ) def __magic_name__ ( ) -> Dict: for _ in range(1_0_0 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def __magic_name__ ( ) -> Dict: snake_case = SkipList() skip_list.insert(2 , '2' ) skip_list.insert(4 , '4' ) skip_list.insert(6 , '4' ) skip_list.insert(4 , '5' ) skip_list.insert(8 , '4' ) skip_list.insert(9 , '4' ) skip_list.delete(4 ) print(A ) if __name__ == "__main__": import doctest doctest.testmod() main()
332
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) lowerCAmelCase_ = pytest.mark.integration @pytest.mark.parametrize('path' , ['paws', 'csv'] ) def __magic_name__ ( A , A ) -> Union[str, Any]: inspect_dataset(A , A ) snake_case = path + '.py' assert script_name in os.listdir(A ) assert "__pycache__" not in os.listdir(A ) @pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.parametrize('path' , ['accuracy'] ) def __magic_name__ ( A , A ) -> int: inspect_metric(A , A ) snake_case = path + '.py' assert script_name in os.listdir(A ) assert "__pycache__" not in os.listdir(A ) @pytest.mark.parametrize( 'path, config_name, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> List[str]: snake_case = get_dataset_config_info(A , config_name=A ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __magic_name__ ( A , A , A ) -> Any: with pytest.raises(A ): get_dataset_config_info(A , config_name=A ) @pytest.mark.parametrize( 'path, expected' , [ ('squad', 'plain_text'), ('acronym_identification', 'default'), ('lhoestq/squad', 'plain_text'), ('lhoestq/test', 'default'), ('lhoestq/demo1', 'lhoestq--demo1'), ('dalle-mini/wit', 'dalle-mini--wit'), ] , ) def __magic_name__ ( A , A ) -> Dict: snake_case = get_dataset_config_names(A ) assert expected in config_names @pytest.mark.parametrize( 'path, expected_configs, expected_splits_in_first_config' , [ ('squad', ['plain_text'], ['train', 'validation']), ('dalle-mini/wit', ['dalle-mini--wit'], ['train']), ('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> List[str]: snake_case = get_dataset_infos(A ) assert list(infos.keys() ) == expected_configs snake_case = expected_configs[0] assert expected_config in infos snake_case = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( 'path, expected_config, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> Any: snake_case = get_dataset_infos(A ) assert expected_config in infos snake_case = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __magic_name__ ( A , A , A ) -> int: with pytest.raises(A ): get_dataset_split_names(A , config_name=A )
332
1
'''simple docstring''' import operator def __magic_name__ ( A , A = False , A = None ) -> list: snake_case = operator.lt if reverse else operator.gt snake_case = solution or [] if not arr: return solution snake_case = [arr.pop(0 )] for i, item in enumerate(A ): if _operator(A , sublist[-1] ): sublist.append(A ) arr.pop(A ) # merging sublist into solution list if not solution: solution.extend(A ) else: while sublist: snake_case = sublist.pop(0 ) for i, xx in enumerate(A ): if not _operator(A , A ): solution.insert(A , A ) break else: solution.append(A ) strand_sort(A , A , A ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
332
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { "configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"], "processing_git": ["GitProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ "GIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GitForCausalLM", "GitModel", "GitPreTrainedModel", "GitVisionModel", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
332
1
'''simple docstring''' import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def __magic_name__ ( A ) -> float: return np.dot(A , A ) class lowerCamelCase : def __init__( self, *, lowercase_ = np.inf, lowercase_ = "linear", lowercase_ = 0.0, ) -> None: snake_case = regularization snake_case = gamma if kernel == "linear": snake_case = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError('rbf kernel requires gamma' ) if not isinstance(self.gamma, (float, int) ): raise ValueError('gamma must be float or int' ) if not self.gamma > 0: raise ValueError('gamma must be > 0' ) snake_case = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: snake_case = F'''Unknown kernel: {kernel}''' raise ValueError(lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> float: return np.dot(lowercase_, lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> float: return np.exp(-(self.gamma * norm_squared(vectora - vectora )) ) def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> None: snake_case = observations snake_case = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((snake_case) , ) = np.shape(lowercase_ ) def to_minimize(lowercase_ ) -> float: snake_case = 0 ((snake_case) , ) = np.shape(lowercase_ ) for i in range(lowercase_ ): for j in range(lowercase_ ): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i], observations[j] ) ) return 1 / 2 * s - sum(lowercase_ ) snake_case = LinearConstraint(lowercase_, 0, 0 ) snake_case = Bounds(0, self.regularization ) snake_case = minimize( lowercase_, np.ones(lowercase_ ), bounds=lowercase_, constraints=[ly_contraint] ).x snake_case = l_star # calculating mean offset of separation plane to points snake_case = 0 for i in range(lowercase_ ): for j in range(lowercase_ ): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i], observations[j] ) snake_case = s / n def _lowerCamelCase ( self, lowercase_ ) -> int: snake_case = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n], lowercase_ ) for n in range(len(self.classes ) ) ) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
332
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu lowerCAmelCase_ = False class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCamelCase ( self ) -> List[Any]: return 12 @property def _lowerCamelCase ( self ) -> Dict: return 12 @property def _lowerCamelCase ( self ) -> List[Any]: return 32 @property def _lowerCamelCase ( self ) -> List[Any]: torch.manual_seed(0 ) snake_case = VQModel( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, ) return model @property def _lowerCamelCase ( self ) -> List[Any]: snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def _lowerCamelCase ( self ) -> Tuple: torch.manual_seed(0 ) snake_case = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(lowercase_ ) @property def _lowerCamelCase ( self ) -> str: torch.manual_seed(0 ) snake_case = 12 snake_case = 12 snake_case = { 'attention_bias': True, 'cross_attention_dim': 32, 'attention_head_dim': height * width, 'num_attention_heads': 1, 'num_vector_embeds': self.num_embed, 'num_embeds_ada_norm': self.num_embeds_ada_norm, 'norm_num_groups': 32, 'sample_size': width, 'activation_fn': 'geglu-approximate', } snake_case = TransformeraDModel(**lowercase_ ) return model def _lowerCamelCase ( self ) -> Tuple: snake_case = 'cpu' snake_case = self.dummy_vqvae snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_transformer snake_case = VQDiffusionScheduler(self.num_embed ) snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ ) snake_case = VQDiffusionPipeline( vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'teddy bear playing in the pool' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( [prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = 'cpu' snake_case = self.dummy_vqvae snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_transformer snake_case = VQDiffusionScheduler(self.num_embed ) snake_case = LearnedClassifierFreeSamplingEmbeddings( learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length ) snake_case = VQDiffusionPipeline( vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'teddy bear playing in the pool' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( [prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self ) -> str: snake_case = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' ) snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' ) snake_case = pipeline.to(lowercase_ ) pipeline.set_progress_bar_config(disable=lowercase_ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipeline( 'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', ) snake_case = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
332
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor from ..utils import is_datasets_available from .base import PipelineTool if is_datasets_available(): from datasets import load_dataset class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''microsoft/speecht5_tts''' snake_case_ = ( '''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the ''' '''text to read (in English) and returns a waveform object containing the sound.''' ) snake_case_ = '''text_reader''' snake_case_ = SpeechTaProcessor snake_case_ = SpeechTaForTextToSpeech snake_case_ = SpeechTaHifiGan snake_case_ = ['''text'''] snake_case_ = ['''audio'''] def _lowerCamelCase ( self ) -> List[Any]: if self.post_processor is None: snake_case = 'microsoft/speecht5_hifigan' super().setup() def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict: snake_case = self.pre_processor(text=lowercase_, return_tensors='pt', truncation=lowercase_ ) if speaker_embeddings is None: if not is_datasets_available(): raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' ) snake_case = load_dataset('Matthijs/cmu-arctic-xvectors', split='validation' ) snake_case = torch.tensor(embeddings_dataset[7305]['xvector'] ).unsqueeze(0 ) return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings} def _lowerCamelCase ( self, lowercase_ ) -> Any: with torch.no_grad(): return self.model.generate_speech(**lowercase_ ) def _lowerCamelCase ( self, lowercase_ ) -> Tuple: with torch.no_grad(): return self.post_processor(lowercase_ ).cpu().detach()
332
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase ( metaclass=__lowerCAmelCase ): snake_case_ = ['''note_seq'''] def __init__( self, *lowercase_, **lowercase_ ) -> str: requires_backends(self, ['note_seq'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]: requires_backends(cls, ['note_seq'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]: requires_backends(cls, ['note_seq'] )
332
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowerCamelCase ( __lowerCAmelCase , unittest.TestCase ): snake_case_ = ShapEPipeline snake_case_ = ['''prompt'''] snake_case_ = ['''prompt'''] snake_case_ = [ '''num_images_per_prompt''', '''num_inference_steps''', '''generator''', '''latents''', '''guidance_scale''', '''frame_size''', '''output_type''', '''return_dict''', ] snake_case_ = False @property def _lowerCamelCase ( self ) -> List[str]: return 32 @property def _lowerCamelCase ( self ) -> List[str]: return 32 @property def _lowerCamelCase ( self ) -> int: return self.time_input_dim * 4 @property def _lowerCamelCase ( self ) -> List[str]: return 8 @property def _lowerCamelCase ( self ) -> Dict: snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def _lowerCamelCase ( self ) -> str: torch.manual_seed(0 ) snake_case = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(lowercase_ ) @property def _lowerCamelCase ( self ) -> List[str]: torch.manual_seed(0 ) snake_case = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } snake_case = PriorTransformer(**lowercase_ ) return model @property def _lowerCamelCase ( self ) -> List[str]: torch.manual_seed(0 ) snake_case = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } snake_case = ShapERenderer(**lowercase_ ) return model def _lowerCamelCase ( self ) -> Any: snake_case = self.dummy_prior snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_renderer snake_case = HeunDiscreteScheduler( beta_schedule='exp', num_train_timesteps=1024, prediction_type='sample', use_karras_sigmas=lowercase_, clip_sample=lowercase_, clip_sample_range=1.0, ) snake_case = { 'prior': prior, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'renderer': renderer, 'scheduler': scheduler, } return components def _lowerCamelCase ( self, lowercase_, lowercase_=0 ) -> str: if str(lowercase_ ).startswith('mps' ): snake_case = torch.manual_seed(lowercase_ ) else: snake_case = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) snake_case = { 'prompt': 'horse', 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def _lowerCamelCase ( self ) -> str: snake_case = 'cpu' snake_case = self.get_dummy_components() snake_case = self.pipeline_class(**lowercase_ ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = pipe(**self.get_dummy_inputs(lowercase_ ) ) snake_case = output.images[0] snake_case = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) snake_case = np.array( [ 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self ) -> int: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def _lowerCamelCase ( self ) -> Union[str, Any]: snake_case = torch_device == 'cpu' snake_case = True self._test_inference_batch_single_identical( batch_size=2, test_max_difference=lowercase_, relax_max_difference=lowercase_, ) def _lowerCamelCase ( self ) -> Tuple: snake_case = self.get_dummy_components() snake_case = self.pipeline_class(**lowercase_ ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 1 snake_case = 2 snake_case = self.get_dummy_inputs(lowercase_ ) for key in inputs.keys(): if key in self.batch_params: snake_case = batch_size * [inputs[key]] snake_case = pipe(**lowercase_, num_images_per_prompt=lowercase_ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self ) -> List[Any]: snake_case = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_np_out.npy' ) snake_case = ShapEPipeline.from_pretrained('openai/shap-e' ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( 'a shark', generator=lowercase_, guidance_scale=15.0, num_inference_steps=64, frame_size=64, output_type='np', ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(lowercase_, lowercase_ )
332
'''simple docstring''' import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor lowerCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase ( __lowerCAmelCase ): def __init__( self, *lowercase_, **lowercase_ ) -> None: warnings.warn( 'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DPTImageProcessor instead.', lowercase_, ) super().__init__(*lowercase_, **lowercase_ )
332
1
'''simple docstring''' import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) def __magic_name__ ( A ) -> Optional[Any]: snake_case = SwinConfig.from_pretrained( 'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] ) snake_case = MaskFormerConfig(backbone_config=A ) snake_case = 'huggingface/label-files' if "ade20k-full" in model_name: # this should be ok snake_case = 8_4_7 snake_case = 'maskformer-ade20k-full-id2label.json' elif "ade" in model_name: # this should be ok snake_case = 1_5_0 snake_case = 'ade20k-id2label.json' elif "coco-stuff" in model_name: # this should be ok snake_case = 1_7_1 snake_case = 'maskformer-coco-stuff-id2label.json' elif "coco" in model_name: # TODO snake_case = 1_3_3 snake_case = 'coco-panoptic-id2label.json' elif "cityscapes" in model_name: # this should be ok snake_case = 1_9 snake_case = 'cityscapes-id2label.json' elif "vistas" in model_name: # this should be ok snake_case = 6_5 snake_case = 'mapillary-vistas-id2label.json' snake_case = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) ) snake_case = {int(A ): v for k, v in idalabel.items()} return config def __magic_name__ ( A ) -> Dict: snake_case = [] # stem # fmt: off rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') ) rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') ) rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') ) # FPN rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') ) rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') ) rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') ) rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') ) rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') ) rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') ) rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') ) # cross-attention out projection rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') ) # MLP 1 rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') ) # MLP 2 rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') ) # layernorm 1 (self-attention layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') ) # layernorm 3 (final layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') ) rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') ) rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') ) # heads on top rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') ) rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') ) rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') ) rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') ) rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') ) for i in range(3 ): rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') ) # fmt: on return rename_keys def __magic_name__ ( A , A , A ) -> Union[str, Any]: snake_case = dct.pop(A ) snake_case = val def __magic_name__ ( A , A ) -> Optional[int]: snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): snake_case = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) snake_case = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' ) snake_case = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case = in_proj_weight[:dim, :] snake_case = in_proj_bias[: dim] snake_case = in_proj_weight[ dim : dim * 2, : ] snake_case = in_proj_bias[ dim : dim * 2 ] snake_case = in_proj_weight[ -dim :, : ] snake_case = in_proj_bias[-dim :] # fmt: on def __magic_name__ ( A , A ) -> int: # fmt: off snake_case = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) snake_case = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' ) snake_case = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case = in_proj_weight[: hidden_size, :] snake_case = in_proj_bias[:config.hidden_size] snake_case = in_proj_weight[hidden_size : hidden_size * 2, :] snake_case = in_proj_bias[hidden_size : hidden_size * 2] snake_case = in_proj_weight[-hidden_size :, :] snake_case = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) snake_case = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' ) snake_case = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case = in_proj_weight[: hidden_size, :] snake_case = in_proj_bias[:config.hidden_size] snake_case = in_proj_weight[hidden_size : hidden_size * 2, :] snake_case = in_proj_bias[hidden_size : hidden_size * 2] snake_case = in_proj_weight[-hidden_size :, :] snake_case = in_proj_bias[-hidden_size :] # fmt: on def __magic_name__ ( ) -> torch.Tensor: snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case = Image.open(requests.get(A , stream=A ).raw ) return im @torch.no_grad() def __magic_name__ ( A , A , A , A = False ) -> Tuple: snake_case = get_maskformer_config(A ) # load original state_dict with open(A , 'rb' ) as f: snake_case = pickle.load(A ) snake_case = data['model'] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys snake_case = create_rename_keys(A ) for src, dest in rename_keys: rename_key(A , A , A ) read_in_swin_q_k_v(A , config.backbone_config ) read_in_decoder_q_k_v(A , A ) # update to torch tensors for key, value in state_dict.items(): snake_case = torch.from_numpy(A ) # load 🤗 model snake_case = MaskFormerForInstanceSegmentation(A ) model.eval() for name, param in model.named_parameters(): print(A , param.shape ) snake_case , snake_case = model.load_state_dict(A , strict=A ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(A ) == 0, F'''Unexpected keys: {unexpected_keys}''' # verify results snake_case = prepare_img() if "vistas" in model_name: snake_case = 6_5 elif "cityscapes" in model_name: snake_case = 6_5_5_3_5 else: snake_case = 2_5_5 snake_case = True if 'ade' in model_name else False snake_case = MaskFormerImageProcessor(ignore_index=A , reduce_labels=A ) snake_case = image_processor(A , return_tensors='pt' ) snake_case = model(**A ) print('Logits:' , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": snake_case = torch.tensor( [[3.6_353, -4.4_770, -2.6_065], [0.5_081, -4.2_394, -3.5_343], [2.1_909, -5.0_353, -1.9_323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , A , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' ) Path(A ).mkdir(exist_ok=A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) if push_to_hub: print('Pushing model and image processor to the hub...' ) model.push_to_hub(F'''nielsr/{model_name}''' ) image_processor.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="maskformer-swin-tiny-ade", type=str, help=("Name of the MaskFormer model you'd like to convert",), ) parser.add_argument( "--checkpoint_path", default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl", type=str, help="Path to the original state dict (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) lowerCAmelCase_ = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
332
'''simple docstring''' import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset lowerCAmelCase_ = pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) lowerCAmelCase_ = dataset.iloc[:, 1:2].values lowerCAmelCase_ = dataset.iloc[:, 2].values lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0) lowerCAmelCase_ = PolynomialFeatures(degree=4) lowerCAmelCase_ = poly_reg.fit_transform(X) lowerCAmelCase_ = LinearRegression() pol_reg.fit(X_poly, y) def __magic_name__ ( ) -> Any: plt.scatter(A , A , color='red' ) plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' ) plt.title('Truth or Bluff (Linear Regression)' ) plt.xlabel('Position level' ) plt.ylabel('Salary' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
332
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor lowerCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase ( __lowerCAmelCase ): def __init__( self, *lowercase_, **lowercase_ ) -> None: warnings.warn( 'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DPTImageProcessor instead.', lowercase_, ) super().__init__(*lowercase_, **lowercase_ )
332
'''simple docstring''' import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''''' snake_case_ = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) snake_case_ = None # compression type in fsspec. ex: "gzip" snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str: super().__init__(self, **lowercase_ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode snake_case = fsspec.open( lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={ 'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459 'trust_env': True, # Enable reading proxy env variables. **(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed. }, **(target_options or {}), ) snake_case = os.path.basename(self.file.path.split('::' )[0] ) snake_case = ( self.compressed_name[: self.compressed_name.rindex('.' )] if '.' in self.compressed_name else self.compressed_name ) snake_case = None @classmethod def _lowerCamelCase ( cls, lowercase_ ) -> Any: # compressed file paths are always relative to the archive root return super()._strip_protocol(lowercase_ ).lstrip('/' ) def _lowerCamelCase ( self ) -> Optional[Any]: if self.dir_cache is None: snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name} snake_case = {f['name']: f} def _lowerCamelCase ( self, lowercase_ ) -> str: return self.file.open().read() def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any: snake_case = self._strip_protocol(lowercase_ ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''bz2''' snake_case_ = '''bz2''' snake_case_ = '''.bz2''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''gzip''' snake_case_ = '''gzip''' snake_case_ = '''.gz''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''lz4''' snake_case_ = '''lz4''' snake_case_ = '''.lz4''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''xz''' snake_case_ = '''xz''' snake_case_ = '''.xz''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''zstd''' snake_case_ = '''zstd''' snake_case_ = '''.zst''' def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]: super().__init__( fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 snake_case = self.file.__enter__ class lowerCamelCase : def __init__( self, lowercase_ ) -> List[Any]: snake_case = file_ def __enter__( self ) -> Dict: self._file.__enter__() return self def __exit__( self, *lowercase_, **lowercase_ ) -> Dict: self._file.__exit__(*lowercase_, **lowercase_ ) def __iter__( self ) -> List[str]: return iter(self._file ) def _lowerCamelCase ( self ) -> List[str]: return next(self._file ) def __getattr__( self, lowercase_ ) -> List[Any]: return getattr(self._file, lowercase_ ) def fixed_enter(*lowercase_, **lowercase_ ): return WrappedFile(_enter(*lowercase_, **lowercase_ ) ) snake_case = fixed_enter
332
1
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self, lowercase_ ) -> Any: for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'], model_result['ss'] ): snake_case = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(lowercase_ ) def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = 'sshleifer/tiny-gpt2' snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], eager_mode=lowercase_, multi_process=lowercase_, ) snake_case = TensorFlowBenchmark(lowercase_ ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ) -> Any: snake_case = 'sgugger/tiny-distilbert-classification' snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], multi_process=lowercase_, only_pretrain_model=lowercase_, ) snake_case = TensorFlowBenchmark(lowercase_ ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ) -> List[str]: snake_case = 'sshleifer/tiny-gpt2' snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], multi_process=lowercase_, ) snake_case = TensorFlowBenchmark(lowercase_ ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ) -> Any: snake_case = 'sshleifer/tiny-gpt2' snake_case = AutoConfig.from_pretrained(lowercase_ ) snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], eager_mode=lowercase_, multi_process=lowercase_, ) snake_case = TensorFlowBenchmark(lowercase_, [config] ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = 'sshleifer/tiny-gpt2' snake_case = AutoConfig.from_pretrained(lowercase_ ) snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], multi_process=lowercase_, ) snake_case = TensorFlowBenchmark(lowercase_, [config] ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ) -> Any: snake_case = 'sshleifer/tiny-gpt2' snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], multi_process=lowercase_, ) snake_case = TensorFlowBenchmark(lowercase_ ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _lowerCamelCase ( self ) -> Dict: snake_case = 'sshleifer/tiny-gpt2' snake_case = AutoConfig.from_pretrained(lowercase_ ) snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], multi_process=lowercase_, ) snake_case = TensorFlowBenchmark(lowercase_, [config] ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = 'patrickvonplaten/t5-tiny-random' snake_case = AutoConfig.from_pretrained(lowercase_ ) snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], multi_process=lowercase_, ) snake_case = TensorFlowBenchmark(lowercase_, configs=[config] ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0, 'Cannot do xla on CPU.' ) def _lowerCamelCase ( self ) -> str: snake_case = 'sshleifer/tiny-gpt2' snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=lowercase_, inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], use_xla=lowercase_, multi_process=lowercase_, ) snake_case = TensorFlowBenchmark(lowercase_ ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ) -> Dict: snake_case = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID], inference=lowercase_, save_to_csv=lowercase_, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(lowercase_, 'inf_time.csv' ), inference_memory_csv_file=os.path.join(lowercase_, 'inf_mem.csv' ), env_info_csv_file=os.path.join(lowercase_, 'env.csv' ), multi_process=lowercase_, ) snake_case = TensorFlowBenchmark(lowercase_ ) benchmark.run() self.assertTrue(Path(os.path.join(lowercase_, 'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase_, 'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase_, 'env.csv' ) ).exists() ) def _lowerCamelCase ( self ) -> Tuple: snake_case = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(lowercase_ ): self.assertTrue(hasattr(lowercase_, 'sequential' ) ) self.assertTrue(hasattr(lowercase_, 'cumulative' ) ) self.assertTrue(hasattr(lowercase_, 'current' ) ) self.assertTrue(hasattr(lowercase_, 'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID], inference=lowercase_, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(lowercase_, 'log.txt' ), log_print=lowercase_, trace_memory_line_by_line=lowercase_, eager_mode=lowercase_, multi_process=lowercase_, ) snake_case = TensorFlowBenchmark(lowercase_ ) snake_case = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(lowercase_, 'log.txt' ) ).exists() )
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A , A , A ) -> int | float: if len(A ) == 0: raise ValueError('find_max() arg is an empty sequence' ) if ( left >= len(A ) or left < -len(A ) or right >= len(A ) or right < -len(A ) ): raise IndexError('list index out of range' ) if left == right: return nums[left] snake_case = (left + right) >> 1 # the middle snake_case = find_max(A , A , A ) # find max in range[left, mid] snake_case = find_max(A , mid + 1 , A ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
332
1
'''simple docstring''' from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class lowerCamelCase : snake_case_ = 42 snake_case_ = None snake_case_ = None lowerCAmelCase_ = namedtuple("CoinsDistribResult", "moves excess") def __magic_name__ ( A ) -> int: if root is None: return 0 # Validation def count_nodes(A ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(A ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(A ) != count_coins(A ): raise ValueError('The nodes number should be same as the number of coins' ) # Main calculation def get_distrib(A ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) snake_case , snake_case = get_distrib(node.left ) snake_case , snake_case = get_distrib(node.right ) snake_case = 1 - left_distrib_excess snake_case = 1 - right_distrib_excess snake_case = ( left_distrib_moves + right_distrib_moves + abs(A ) + abs(A ) ) snake_case = node.data - coins_to_left - coins_to_right return CoinsDistribResult(A , A ) return get_distrib(A )[0] if __name__ == "__main__": import doctest doctest.testmod()
332
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = 42 class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ): @register_to_config def __init__( self, lowercase_ = 3, lowercase_ = 3, lowercase_ = ("DownEncoderBlock2D",), lowercase_ = ("UpDecoderBlock2D",), lowercase_ = (64,), lowercase_ = 1, lowercase_ = "silu", lowercase_ = 3, lowercase_ = 32, lowercase_ = 256, lowercase_ = 32, lowercase_ = None, lowercase_ = 0.18_215, lowercase_ = "group", ) -> str: super().__init__() # pass init params to Encoder snake_case = Encoder( in_channels=lowercase_, out_channels=lowercase_, down_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, double_z=lowercase_, ) snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels snake_case = nn.Convad(lowercase_, lowercase_, 1 ) snake_case = VectorQuantizer(lowercase_, lowercase_, beta=0.25, remap=lowercase_, sane_index_shape=lowercase_ ) snake_case = nn.Convad(lowercase_, lowercase_, 1 ) # pass init params to Decoder snake_case = Decoder( in_channels=lowercase_, out_channels=lowercase_, up_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, norm_type=lowercase_, ) @apply_forward_hook def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> VQEncoderOutput: snake_case = self.encoder(lowercase_ ) snake_case = self.quant_conv(lowercase_ ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowercase_ ) @apply_forward_hook def _lowerCamelCase ( self, lowercase_, lowercase_ = False, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: # also go through quantization layer if not force_not_quantize: snake_case , snake_case , snake_case = self.quantize(lowercase_ ) else: snake_case = h snake_case = self.post_quant_conv(lowercase_ ) snake_case = self.decoder(lowercase_, quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: snake_case = sample snake_case = self.encode(lowercase_ ).latents snake_case = self.decode(lowercase_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowercase_ )
332
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCamelCase ( unittest.TestCase ): @slow def _lowerCamelCase ( self ) -> Dict: snake_case = XLMRobertaModel.from_pretrained('xlm-roberta-base' ) snake_case = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] ) # The dog is cute and lives in the garden house snake_case = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim snake_case = torch.tensor( [[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case = model(lowercase_ )['last_hidden_state'].detach() self.assertEqual(output.shape, lowercase_ ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1], lowercase_, atol=1E-3 ) ) @slow def _lowerCamelCase ( self ) -> int: snake_case = XLMRobertaModel.from_pretrained('xlm-roberta-large' ) snake_case = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] ) # The dog is cute and lives in the garden house snake_case = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim snake_case = torch.tensor( [[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case = model(lowercase_ )['last_hidden_state'].detach() self.assertEqual(output.shape, lowercase_ ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1], lowercase_, atol=1E-3 ) )
332
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int: snake_case = [0] snake_case = 42 for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target snake_case = 0 # the area corresponding to the grid that gives the product closest to target snake_case = 0 # an estimate of b, using the quadratic formula snake_case = 42 # the largest integer less than b_estimate snake_case = 42 # the largest integer less than b_estimate snake_case = 42 # the triangle number corresponding to b_floor snake_case = 42 # the triangle number corresponding to b_ceil snake_case = 42 for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 snake_case = floor(A ) snake_case = ceil(A ) snake_case = triangle_numbers[b_floor] snake_case = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): snake_case = triangle_b_first_guess * triangle_a snake_case = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): snake_case = triangle_b_second_guess * triangle_a snake_case = idx_a * b_ceil return area if __name__ == "__main__": print(f"{solution() = }")
332
1
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
332
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = { "configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"], "processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["VisionTextDualEncoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["FlaxVisionTextDualEncoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["TFVisionTextDualEncoderModel"] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
332
1
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf lowerCAmelCase_ = logging.get_logger(__name__) @dataclass class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self, **lowercase_ ) -> Optional[Any]: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: snake_case = deprecated_arg[3:] snake_case = not kwargs.pop(lowercase_ ) logger.warning( F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or''' F''' {positive_arg}={kwargs[positive_arg]}''' ) snake_case = kwargs.pop('tpu_name', self.tpu_name ) snake_case = kwargs.pop('device_idx', self.device_idx ) snake_case = kwargs.pop('eager_mode', self.eager_mode ) snake_case = kwargs.pop('use_xla', self.use_xla ) super().__init__(**lowercase_ ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Name of TPU'''} , ) snake_case_ = field( default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , ) snake_case_ = field(default=__lowerCAmelCase , metadata={'''help''': '''Benchmark models in eager model.'''} ) snake_case_ = field( default=__lowerCAmelCase , metadata={ '''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.''' } , ) @cached_property def _lowerCamelCase ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self, ['tf'] ) snake_case = None if self.tpu: try: if self.tpu_name: snake_case = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: snake_case = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: snake_case = None return tpu @cached_property def _lowerCamelCase ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self, ['tf'] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) snake_case = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx], 'GPU' ) snake_case = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' ) else: tf.config.set_visible_devices([], 'GPU' ) # disable GPU snake_case = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' ) return strategy @property def _lowerCamelCase ( self ) -> bool: requires_backends(self, ['tf'] ) return self._setup_tpu is not None @property def _lowerCamelCase ( self ) -> "tf.distribute.Strategy": requires_backends(self, ['tf'] ) return self._setup_strategy @property def _lowerCamelCase ( self ) -> List[str]: requires_backends(self, ['tf'] ) return tf.config.list_physical_devices('GPU' ) @property def _lowerCamelCase ( self ) -> int: requires_backends(self, ['tf'] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _lowerCamelCase ( self ) -> bool: return self.n_gpu > 0
332
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCamelCase : snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} ) snake_case_ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) snake_case_ = field( default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , ) snake_case_ = field( default=64 , metadata={ '''help''': ( '''The maximum number of tokens for the question. Questions longer than this will ''' '''be truncated to this length.''' ) } , ) snake_case_ = field( default=30 , metadata={ '''help''': ( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ) } , ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} ) snake_case_ = field( default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) snake_case_ = field( default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) snake_case_ = field( default=0 , metadata={ '''help''': ( '''language id of input for language-specific xlm models (see''' ''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)''' ) } , ) snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} ) class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''train''' snake_case_ = '''dev''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int: snake_case = args snake_case = is_language_sensitive snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(lowercase_, lowercase_ ): try: snake_case = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) snake_case = mode # Load data features from cache or dataset file snake_case = 'v2' if args.version_2_with_negative else 'v1' snake_case = os.path.join( cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. snake_case = cached_features_file + '.lock' with FileLock(lowercase_ ): if os.path.exists(lowercase_ ) and not args.overwrite_cache: snake_case = time.time() snake_case = torch.load(lowercase_ ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. snake_case = self.old_features['features'] snake_case = self.old_features.get('dataset', lowercase_ ) snake_case = self.old_features.get('examples', lowercase_ ) logger.info( F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' ' future run' ) else: if mode == Split.dev: snake_case = self.processor.get_dev_examples(args.data_dir ) else: snake_case = self.processor.get_train_examples(args.data_dir ) snake_case , snake_case = squad_convert_examples_to_features( examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, ) snake_case = time.time() torch.save( {'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self ) -> Tuple: return len(self.features ) def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]: # Convert to Tensors and build dataset snake_case = self.features[i] snake_case = torch.tensor(feature.input_ids, dtype=torch.long ) snake_case = torch.tensor(feature.attention_mask, dtype=torch.long ) snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long ) snake_case = torch.tensor(feature.cls_index, dtype=torch.long ) snake_case = torch.tensor(feature.p_mask, dtype=torch.float ) snake_case = torch.tensor(feature.is_impossible, dtype=torch.float ) snake_case = { 'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'cls_index': cls_index, 'p_mask': p_mask} ) if self.args.version_2_with_negative: inputs.update({'is_impossible': is_impossible} ) if self.is_language_sensitive: inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: snake_case = torch.tensor(feature.start_position, dtype=torch.long ) snake_case = torch.tensor(feature.end_position, dtype=torch.long ) inputs.update({'start_positions': start_positions, 'end_positions': end_positions} ) return inputs
332
1
'''simple docstring''' import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated lowerCAmelCase_ = collections.namedtuple("_Datasets", ["train", "validation", "test"]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ lowerCAmelCase_ = "https://storage.googleapis.com/cvdf-datasets/mnist/" def __magic_name__ ( A ) -> List[Any]: snake_case = numpy.dtype(numpy.uintaa ).newbyteorder('>' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=A )[0] @deprecated(A , 'Please use tf.data to implement this functionality.' ) def __magic_name__ ( A ) -> Optional[int]: print('Extracting' , f.name ) with gzip.GzipFile(fileobj=A ) as bytestream: snake_case = _readaa(A ) if magic != 2_0_5_1: raise ValueError( 'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) ) snake_case = _readaa(A ) snake_case = _readaa(A ) snake_case = _readaa(A ) snake_case = bytestream.read(rows * cols * num_images ) snake_case = numpy.frombuffer(A , dtype=numpy.uinta ) snake_case = data.reshape(A , A , A , 1 ) return data @deprecated(A , 'Please use tf.one_hot on tensors.' ) def __magic_name__ ( A , A ) -> Union[str, Any]: snake_case = labels_dense.shape[0] snake_case = numpy.arange(A ) * num_classes snake_case = numpy.zeros((num_labels, num_classes) ) snake_case = 1 return labels_one_hot @deprecated(A , 'Please use tf.data to implement this functionality.' ) def __magic_name__ ( A , A=False , A=1_0 ) -> Dict: print('Extracting' , f.name ) with gzip.GzipFile(fileobj=A ) as bytestream: snake_case = _readaa(A ) if magic != 2_0_4_9: raise ValueError( 'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) ) snake_case = _readaa(A ) snake_case = bytestream.read(A ) snake_case = numpy.frombuffer(A , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(A , A ) return labels class lowerCamelCase : @deprecated( lowercase_, 'Please use alternatives such as official/mnist/_DataSet.py' ' from tensorflow/models.', ) def __init__( self, lowercase_, lowercase_, lowercase_=False, lowercase_=False, lowercase_=dtypes.floataa, lowercase_=True, lowercase_=None, ) -> Dict: snake_case , snake_case = random_seed.get_seed(lowercase_ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) snake_case = dtypes.as_dtype(lowercase_ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype ) if fake_data: snake_case = 10000 snake_case = one_hot else: assert ( images.shape[0] == labels.shape[0] ), F'''images.shape: {images.shape} labels.shape: {labels.shape}''' snake_case = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 snake_case = images.reshape( images.shape[0], images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. snake_case = images.astype(numpy.floataa ) snake_case = numpy.multiply(lowercase_, 1.0 / 255.0 ) snake_case = images snake_case = labels snake_case = 0 snake_case = 0 @property def _lowerCamelCase ( self ) -> Dict: return self._images @property def _lowerCamelCase ( self ) -> int: return self._labels @property def _lowerCamelCase ( self ) -> Union[str, Any]: return self._num_examples @property def _lowerCamelCase ( self ) -> List[str]: return self._epochs_completed def _lowerCamelCase ( self, lowercase_, lowercase_=False, lowercase_=True ) -> List[str]: if fake_data: snake_case = [1] * 784 snake_case = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(lowercase_ )], [fake_label for _ in range(lowercase_ )], ) snake_case = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: snake_case = numpy.arange(self._num_examples ) numpy.random.shuffle(lowercase_ ) snake_case = self.images[perma] snake_case = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch snake_case = self._num_examples - start snake_case = self._images[start : self._num_examples] snake_case = self._labels[start : self._num_examples] # Shuffle the data if shuffle: snake_case = numpy.arange(self._num_examples ) numpy.random.shuffle(lowercase_ ) snake_case = self.images[perm] snake_case = self.labels[perm] # Start next epoch snake_case = 0 snake_case = batch_size - rest_num_examples snake_case = self._index_in_epoch snake_case = self._images[start:end] snake_case = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part), axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part), axis=0 ), ) else: self._index_in_epoch += batch_size snake_case = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(A , 'Please write your own downloading logic.' ) def __magic_name__ ( A , A , A ) -> str: if not gfile.Exists(A ): gfile.MakeDirs(A ) snake_case = os.path.join(A , A ) if not gfile.Exists(A ): urllib.request.urlretrieve(A , A ) # noqa: S310 with gfile.GFile(A ) as f: snake_case = f.size() print('Successfully downloaded' , A , A , 'bytes.' ) return filepath @deprecated( A , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' ) def __magic_name__ ( A , A=False , A=False , A=dtypes.floataa , A=True , A=5_0_0_0 , A=None , A=DEFAULT_SOURCE_URL , ) -> Tuple: if fake_data: def fake(): return _DataSet( [] , [] , fake_data=A , one_hot=A , dtype=A , seed=A ) snake_case = fake() snake_case = fake() snake_case = fake() return _Datasets(train=A , validation=A , test=A ) if not source_url: # empty string check snake_case = DEFAULT_SOURCE_URL snake_case = 'train-images-idx3-ubyte.gz' snake_case = 'train-labels-idx1-ubyte.gz' snake_case = 't10k-images-idx3-ubyte.gz' snake_case = 't10k-labels-idx1-ubyte.gz' snake_case = _maybe_download( A , A , source_url + train_images_file ) with gfile.Open(A , 'rb' ) as f: snake_case = _extract_images(A ) snake_case = _maybe_download( A , A , source_url + train_labels_file ) with gfile.Open(A , 'rb' ) as f: snake_case = _extract_labels(A , one_hot=A ) snake_case = _maybe_download( A , A , source_url + test_images_file ) with gfile.Open(A , 'rb' ) as f: snake_case = _extract_images(A ) snake_case = _maybe_download( A , A , source_url + test_labels_file ) with gfile.Open(A , 'rb' ) as f: snake_case = _extract_labels(A , one_hot=A ) if not 0 <= validation_size <= len(A ): snake_case = ( 'Validation size should be between 0 and ' F'''{len(A )}. Received: {validation_size}.''' ) raise ValueError(A ) snake_case = train_images[:validation_size] snake_case = train_labels[:validation_size] snake_case = train_images[validation_size:] snake_case = train_labels[validation_size:] snake_case = {'dtype': dtype, 'reshape': reshape, 'seed': seed} snake_case = _DataSet(A , A , **A ) snake_case = _DataSet(A , A , **A ) snake_case = _DataSet(A , A , **A ) return _Datasets(train=A , validation=A , test=A )
332
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def __magic_name__ ( A , A , A ) -> Any: # Initialise PyTorch model snake_case = BertConfig.from_json_file(A ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case = BertForPreTraining(A ) # Load weights from tf checkpoint load_tf_weights_in_bert(A , A , A ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , A ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--bert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
332
1
'''simple docstring''' # We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler") class lowerCamelCase : def __init__( self, lowercase_, lowercase_, lowercase_ = True, lowercase_ = False ) -> Optional[int]: snake_case = scheduler snake_case = optimizers if isinstance(lowercase_, (list, tuple) ) else [optimizers] snake_case = split_batches snake_case = step_with_optimizer snake_case = GradientState() def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> Union[str, Any]: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*lowercase_, **lowercase_ ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*lowercase_, **lowercase_ ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step snake_case = AcceleratorState().num_processes for _ in range(lowercase_ ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler, 'total_steps' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*lowercase_, **lowercase_ ) else: self.scheduler.step(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self ) -> List[Any]: return self.scheduler.get_last_lr() def _lowerCamelCase ( self ) -> Union[str, Any]: return self.scheduler.state_dict() def _lowerCamelCase ( self, lowercase_ ) -> List[str]: self.scheduler.load_state_dict(lowercase_ ) def _lowerCamelCase ( self ) -> List[str]: return self.scheduler.get_lr() def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> List[str]: return self.scheduler.print_lr(*lowercase_, **lowercase_ )
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A ) -> list: if len(A ) == 0: return [] snake_case , snake_case = min(A ), max(A ) snake_case = int(max_value - min_value ) + 1 snake_case = [[] for _ in range(A )] for i in my_list: buckets[int(i - min_value )].append(A ) return [v for bucket in buckets for v in sorted(A )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
332
1
'''simple docstring''' def __magic_name__ ( A ) -> list: snake_case = len(A ) for i in range(1 , A ): snake_case = collection[i] snake_case = 0 snake_case = i - 1 while low <= high: snake_case = (low + high) // 2 if val < collection[mid]: snake_case = mid - 1 else: snake_case = mid + 1 for j in range(A , A , -1 ): snake_case = collection[j - 1] snake_case = val return collection if __name__ == "__main__": lowerCAmelCase_ = input("Enter numbers separated by a comma:\n").strip() lowerCAmelCase_ = [int(item) for item in user_input.split(",")] print(binary_insertion_sort(unsorted))
332
'''simple docstring''' def __magic_name__ ( A ) -> float: return 1_0 - x * x def __magic_name__ ( A , A ) -> float: # Bolzano theory in order to find if there is a root between a and b if equation(A ) * equation(A ) >= 0: raise ValueError('Wrong space!' ) snake_case = a while (b - a) >= 0.01: # Find middle point snake_case = (a + b) / 2 # Check if middle point is root if equation(A ) == 0.0: break # Decide the side to repeat the steps if equation(A ) * equation(A ) < 0: snake_case = c else: snake_case = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
332
1
'''simple docstring''' import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class lowerCamelCase : def __init__( self, lowercase_, lowercase_=14, lowercase_=7, lowercase_=True, lowercase_=True, lowercase_=False, lowercase_=True, lowercase_=99, lowercase_=32, lowercase_=4, lowercase_=4, lowercase_=4, lowercase_=37, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=0.02, ) -> Optional[int]: snake_case = parent snake_case = batch_size snake_case = seq_length snake_case = is_training snake_case = use_input_mask snake_case = use_token_type_ids snake_case = use_labels snake_case = vocab_size snake_case = hidden_size snake_case = rotary_dim snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = intermediate_size snake_case = hidden_act snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = initializer_range snake_case = None snake_case = vocab_size - 1 snake_case = vocab_size - 1 snake_case = vocab_size - 1 def _lowerCamelCase ( self ) -> List[Any]: snake_case = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) snake_case = None if self.use_input_mask: snake_case = random_attention_mask([self.batch_size, self.seq_length] ) snake_case = GPTJConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, use_cache=lowercase_, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, rotary_dim=self.rotary_dim, ) return (config, input_ids, input_mask) def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = self.prepare_config_and_inputs() snake_case , snake_case , snake_case = config_and_inputs snake_case = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_ ) -> Dict: snake_case = 20 snake_case = model_class_name(lowercase_ ) snake_case = model.init_cache(input_ids.shape[0], lowercase_ ) snake_case = jnp.ones((input_ids.shape[0], max_decoder_length), dtype='i4' ) snake_case = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case = model( input_ids[:, :-1], attention_mask=lowercase_, past_key_values=lowercase_, position_ids=lowercase_, ) snake_case = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype='i4' ) snake_case = model( input_ids[:, -1:], attention_mask=lowercase_, past_key_values=outputs_cache.past_key_values, position_ids=lowercase_, ) snake_case = model(lowercase_ ) snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''' ) def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_ ) -> Tuple: snake_case = 20 snake_case = model_class_name(lowercase_ ) snake_case = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )], axis=-1, ) snake_case = model.init_cache(input_ids.shape[0], lowercase_ ) snake_case = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case = model( input_ids[:, :-1], attention_mask=lowercase_, past_key_values=lowercase_, position_ids=lowercase_, ) snake_case = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype='i4' ) snake_case = model( input_ids[:, -1:], past_key_values=outputs_cache.past_key_values, attention_mask=lowercase_, position_ids=lowercase_, ) snake_case = model(lowercase_, attention_mask=lowercase_ ) snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''' ) @require_flax class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): snake_case_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () snake_case_ = (FlaxGPTJForCausalLM,) if is_flax_available() else () def _lowerCamelCase ( self ) -> Any: snake_case = FlaxGPTJModelTester(self ) def _lowerCamelCase ( self ) -> Optional[Any]: for model_class_name in self.all_model_classes: snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowercase_, lowercase_, lowercase_, lowercase_ ) def _lowerCamelCase ( self ) -> List[str]: for model_class_name in self.all_model_classes: snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowercase_, lowercase_, lowercase_, lowercase_ ) @tooslow def _lowerCamelCase ( self ) -> str: snake_case = GPTaTokenizer.from_pretrained('gpt2', pad_token='<|endoftext|>', padding_side='left' ) snake_case = tokenizer(['Hello this is a long string', 'Hey'], return_tensors='np', padding=lowercase_, truncation=lowercase_ ) snake_case = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' ) snake_case = False snake_case = model.config.eos_token_id snake_case = jax.jit(model.generate ) snake_case = jit_generate( inputs['input_ids'], attention_mask=inputs['attention_mask'], pad_token_id=tokenizer.pad_token_id ).sequences snake_case = tokenizer.batch_decode(lowercase_, skip_special_tokens=lowercase_ ) snake_case = [ 'Hello this is a long string of text.\n\nI\'m trying to get the text of the', 'Hey, I\'m a little late to the party. I\'m going to', ] self.assertListEqual(lowercase_, lowercase_ ) @is_pt_flax_cross_test def _lowerCamelCase ( self ) -> Optional[Any]: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case = self._prepare_for_class(lowercase_, lowercase_ ) snake_case = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case = getattr(lowercase_, lowercase_ ) snake_case , snake_case = pt_inputs['input_ids'].shape snake_case = np.random.randint(0, seq_length - 1, size=(batch_size,) ) for batch_idx, start_index in enumerate(lowercase_ ): snake_case = 0 snake_case = 1 snake_case = 0 snake_case = 1 snake_case = pt_model_class(lowercase_ ).eval() snake_case = model_class(lowercase_, dtype=jnp.floataa ) snake_case = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowercase_ ) snake_case = fx_state with torch.no_grad(): snake_case = pt_model(**lowercase_ ).to_tuple() snake_case = fx_model(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ), len(lowercase_ ), 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(lowercase_, lowercase_ ): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowercase_ ) snake_case = model_class.from_pretrained(lowercase_, from_pt=lowercase_ ) snake_case = fx_model_loaded(**lowercase_ ).to_tuple() self.assertEqual( len(lowercase_ ), len(lowercase_ ), 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(lowercase_, lowercase_ ): self.assert_almost_equals(fx_output_loaded[:, -1], pt_output[:, -1].numpy(), 4E-2 ) @is_pt_flax_cross_test def _lowerCamelCase ( self ) -> Dict: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case = self._prepare_for_class(lowercase_, lowercase_ ) snake_case = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case = getattr(lowercase_, lowercase_ ) snake_case = pt_model_class(lowercase_ ).eval() snake_case = model_class(lowercase_, dtype=jnp.floataa ) snake_case = load_flax_weights_in_pytorch_model(lowercase_, fx_model.params ) snake_case , snake_case = pt_inputs['input_ids'].shape snake_case = np.random.randint(0, seq_length - 1, size=(batch_size,) ) for batch_idx, start_index in enumerate(lowercase_ ): snake_case = 0 snake_case = 1 snake_case = 0 snake_case = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): snake_case = pt_model(**lowercase_ ).to_tuple() snake_case = fx_model(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ), len(lowercase_ ), 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(lowercase_, lowercase_ ): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowercase_ ) snake_case = pt_model_class.from_pretrained(lowercase_, from_flax=lowercase_ ) with torch.no_grad(): snake_case = pt_model_loaded(**lowercase_ ).to_tuple() self.assertEqual( len(lowercase_ ), len(lowercase_ ), 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(lowercase_, lowercase_ ): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4E-2 ) @tooslow def _lowerCamelCase ( self ) -> List[str]: for model_class_name in self.all_model_classes: snake_case = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' ) snake_case = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowercase_ )
332
'''simple docstring''' import pytest lowerCAmelCase_ = "__dummy_dataset1__" lowerCAmelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n" @pytest.fixture def __magic_name__ ( ) -> List[Any]: return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def __magic_name__ ( ) -> Union[str, Any]: return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def __magic_name__ ( A , A , A ) -> Optional[int]: snake_case = dataset_loading_script_name snake_case = tmp_path / 'datasets' / script_name script_dir.mkdir(parents=A ) snake_case = script_dir / F'''{script_name}.py''' with open(A , 'w' ) as f: f.write(A ) return str(A )
332
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { "microsoft/swinv2-tiny-patch4-window8-256": ( "https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json" ), } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''swinv2''' snake_case_ = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self, lowercase_=224, lowercase_=4, lowercase_=3, lowercase_=96, lowercase_=[2, 2, 6, 2], lowercase_=[3, 6, 12, 24], lowercase_=7, lowercase_=4.0, lowercase_=True, lowercase_=0.0, lowercase_=0.0, lowercase_=0.1, lowercase_="gelu", lowercase_=False, lowercase_=0.02, lowercase_=1E-5, lowercase_=32, **lowercase_, ) -> Dict: super().__init__(**lowercase_ ) snake_case = image_size snake_case = patch_size snake_case = num_channels snake_case = embed_dim snake_case = depths snake_case = len(lowercase_ ) snake_case = num_heads snake_case = window_size snake_case = mlp_ratio snake_case = qkv_bias snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = drop_path_rate snake_case = hidden_act snake_case = use_absolute_embeddings snake_case = layer_norm_eps snake_case = initializer_range snake_case = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model snake_case = int(embed_dim * 2 ** (len(lowercase_ ) - 1) ) snake_case = (0, 0, 0, 0)
332
'''simple docstring''' from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time lowerCAmelCase_ = Lock() def __magic_name__ ( A , A , A , A , A , A , A ) -> Any: global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 1_0 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(A ) process_lock.release() # receive your right neighbor's value process_lock.acquire() snake_case = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left snake_case = min(A , A ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(A ) process_lock.release() # receive your left neighbor's value process_lock.acquire() snake_case = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right snake_case = max(A , A ) # after all swaps are performed, send the values back to main result_pipe[1].send(A ) def __magic_name__ ( A ) -> str: snake_case = [] snake_case = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop snake_case = Pipe() snake_case = Pipe() process_array_.append( Process( target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) snake_case = temp_rs snake_case = temp_rr for i in range(1 , len(A ) - 1 ): snake_case = Pipe() snake_case = Pipe() process_array_.append( Process( target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) snake_case = temp_rs snake_case = temp_rr process_array_.append( Process( target=A , args=( len(A ) - 1, arr[len(A ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(A ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(A ) ): snake_case = result_pipe[p][0].recv() process_array_[p].join() return arr def __magic_name__ ( ) -> Tuple: snake_case = list(range(1_0 , 0 , -1 ) ) print('Initial List' ) print(*A ) snake_case = odd_even_transposition(A ) print('Sorted List\n' ) print(*A ) if __name__ == "__main__": main()
332
1
'''simple docstring''' import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase_ = { "facebook/mask2former-swin-small-coco-instance": ( "https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } lowerCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''mask2former''' snake_case_ = ['''swin'''] snake_case_ = {'''hidden_size''': '''hidden_dim'''} def __init__( self, lowercase_ = None, lowercase_ = 256, lowercase_ = 256, lowercase_ = 256, lowercase_ = 1024, lowercase_ = "relu", lowercase_ = 6, lowercase_ = 10, lowercase_ = 8, lowercase_ = 0.0, lowercase_ = 2048, lowercase_ = False, lowercase_ = False, lowercase_ = 4, lowercase_ = 255, lowercase_ = 100, lowercase_ = 0.1, lowercase_ = 2.0, lowercase_ = 5.0, lowercase_ = 5.0, lowercase_ = 12544, lowercase_ = 3.0, lowercase_ = 0.75, lowercase_ = 0.02, lowercase_ = 1.0, lowercase_ = True, lowercase_ = [4, 8, 16, 32], lowercase_ = None, **lowercase_, ) -> int: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' ) snake_case = CONFIG_MAPPING['swin']( image_size=224, in_channels=3, patch_size=4, embed_dim=96, depths=[2, 2, 18, 2], num_heads=[3, 6, 12, 24], window_size=7, drop_path_rate=0.3, use_absolute_embeddings=lowercase_, out_features=['stage1', 'stage2', 'stage3', 'stage4'], ) if isinstance(lowercase_, lowercase_ ): snake_case = backbone_config.pop('model_type' ) snake_case = CONFIG_MAPPING[backbone_model_type] snake_case = config_class.from_dict(lowercase_ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. ''' F'''Supported model types: {",".join(self.backbones_supported )}''' ) snake_case = backbone_config snake_case = feature_size snake_case = mask_feature_size snake_case = hidden_dim snake_case = encoder_feedforward_dim snake_case = activation_function snake_case = encoder_layers snake_case = decoder_layers snake_case = num_attention_heads snake_case = dropout snake_case = dim_feedforward snake_case = pre_norm snake_case = enforce_input_projection snake_case = common_stride snake_case = ignore_value snake_case = num_queries snake_case = no_object_weight snake_case = class_weight snake_case = mask_weight snake_case = dice_weight snake_case = train_num_points snake_case = oversample_ratio snake_case = importance_sample_ratio snake_case = init_std snake_case = init_xavier_std snake_case = use_auxiliary_loss snake_case = feature_strides snake_case = output_auxiliary_logits snake_case = decoder_layers super().__init__(**lowercase_ ) @classmethod def _lowerCamelCase ( cls, lowercase_, **lowercase_ ) -> str: return cls( backbone_config=lowercase_, **lowercase_, ) def _lowerCamelCase ( self ) -> Dict[str, any]: snake_case = copy.deepcopy(self.__dict__ ) snake_case = self.backbone_config.to_dict() snake_case = self.__class__.model_type return output
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A ) -> None: create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] ) def __magic_name__ ( A , A , A , A , ) -> None: if index == len(A ): print(A ) return for i in range(len(A ) ): if not index_used[i]: current_sequence.append(sequence[i] ) snake_case = True create_state_space_tree(A , A , index + 1 , A ) current_sequence.pop() snake_case = False lowerCAmelCase_ = [3, 1, 2, 4] generate_all_permutations(sequence) lowerCAmelCase_ = ["A", "B", "C"] generate_all_permutations(sequence_a)
332
1
'''simple docstring''' def __magic_name__ ( A , A ) -> float: if mass < 0: raise ValueError('The mass of a body cannot be negative' ) return 0.5 * mass * abs(A ) * abs(A ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
332
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''roberta''' def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple: super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ ) snake_case = vocab_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = hidden_act snake_case = intermediate_size snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_vocab_size snake_case = initializer_range snake_case = layer_norm_eps snake_case = position_embedding_type snake_case = use_cache snake_case = classifier_dropout class lowerCamelCase ( __lowerCAmelCase ): @property def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
332
1
'''simple docstring''' from typing import Union import fire import torch from tqdm import tqdm def __magic_name__ ( A , A = "cpu" , A = None ) -> None: snake_case = torch.load(A , map_location=A ) for k, v in tqdm(state_dict.items() ): if not isinstance(A , torch.Tensor ): raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' ) snake_case = v.half() if save_path is None: # overwrite src_path snake_case = src_path torch.save(A , A ) if __name__ == "__main__": fire.Fire(convert)
332
'''simple docstring''' import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} lowerCAmelCase_ = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } lowerCAmelCase_ = { "allenai/led-base-16384": 1_6_3_8_4, } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = LEDTokenizer snake_case_ = ['''input_ids''', '''attention_mask'''] def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int: super().__init__( lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, ) snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space: snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) ) snake_case = add_prefix_space snake_case = pre_tok_class(**lowercase_ ) snake_case = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` snake_case = 'post_processor' snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ ) if tokenizer_component_instance: snake_case = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: snake_case = tuple(state['sep'] ) if "cls" in state: snake_case = tuple(state['cls'] ) snake_case = False if state.get('add_prefix_space', lowercase_ ) != add_prefix_space: snake_case = add_prefix_space snake_case = True if state.get('trim_offsets', lowercase_ ) != trim_offsets: snake_case = trim_offsets snake_case = True if changes_to_apply: snake_case = getattr(lowercase_, state.pop('type' ) ) snake_case = component_class(**lowercase_ ) setattr(self.backend_tokenizer, lowercase_, lowercase_ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _lowerCamelCase ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _lowerCamelCase ( self, lowercase_ ) -> Any: snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value snake_case = value def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding: snake_case = kwargs.get('is_split_into_words', lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding: snake_case = kwargs.get('is_split_into_words', lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._encode_plus(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]: snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ ) return tuple(lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict: snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]: snake_case = [self.sep_token_id] snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict: snake_case = super()._pad( encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, ) # Load from model defaults if return_attention_mask is None: snake_case = 'attention_mask' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: snake_case = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ ) if needs_to_be_padded: snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` snake_case = ( encoded_inputs['global_attention_mask'] + [-1] * difference ) elif self.padding_side == "left": snake_case = [-1] * difference + encoded_inputs[ 'global_attention_mask' ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return encoded_inputs
332
1
'''simple docstring''' from __future__ import annotations from typing import Any class lowerCamelCase : def __init__( self, lowercase_, lowercase_, lowercase_ = 0 ) -> None: snake_case , snake_case = row, column snake_case = [[default_value for c in range(lowercase_ )] for r in range(lowercase_ )] def __str__( self ) -> str: snake_case = F'''Matrix consist of {self.row} rows and {self.column} columns\n''' # Make string identifier snake_case = 0 for row_vector in self.array: for obj in row_vector: snake_case = max(lowercase_, len(str(lowercase_ ) ) ) snake_case = F'''%{max_element_length}s''' # Make string and return def single_line(lowercase_ ) -> str: nonlocal string_format_identifier snake_case = '[' line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(lowercase_ ) for row_vector in self.array ) return s def __repr__( self ) -> str: return str(self ) def _lowerCamelCase ( self, lowercase_ ) -> bool: if not (isinstance(lowercase_, (list, tuple) ) and len(lowercase_ ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self, lowercase_ ) -> Any: assert self.validate_indicies(lowercase_ ) return self.array[loc[0]][loc[1]] def __setitem__( self, lowercase_, lowercase_ ) -> None: assert self.validate_indicies(lowercase_ ) snake_case = value def __add__( self, lowercase_ ) -> Matrix: assert isinstance(lowercase_, lowercase_ ) assert self.row == another.row and self.column == another.column # Add snake_case = Matrix(self.row, self.column ) for r in range(self.row ): for c in range(self.column ): snake_case = self[r, c] + another[r, c] return result def __neg__( self ) -> Matrix: snake_case = Matrix(self.row, self.column ) for r in range(self.row ): for c in range(self.column ): snake_case = -self[r, c] return result def __sub__( self, lowercase_ ) -> Matrix: return self + (-another) def __mul__( self, lowercase_ ) -> Matrix: if isinstance(lowercase_, (int, float) ): # Scalar multiplication snake_case = Matrix(self.row, self.column ) for r in range(self.row ): for c in range(self.column ): snake_case = self[r, c] * another return result elif isinstance(lowercase_, lowercase_ ): # Matrix multiplication assert self.column == another.row snake_case = Matrix(self.row, another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: snake_case = F'''Unsupported type given for another ({type(lowercase_ )})''' raise TypeError(lowercase_ ) def _lowerCamelCase ( self ) -> Matrix: snake_case = Matrix(self.column, self.row ) for r in range(self.row ): for c in range(self.column ): snake_case = self[r, c] return result def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> Any: assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate snake_case = v.transpose() snake_case = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def __magic_name__ ( ) -> None: # a^(-1) snake_case = Matrix(3 , 3 , 0 ) for i in range(3 ): snake_case = 1 print(F'''a^(-1) is {ainv}''' ) # u, v snake_case = Matrix(3 , 1 , 0 ) snake_case , snake_case , snake_case = 1, 2, -3 snake_case = Matrix(3 , 1 , 0 ) snake_case , snake_case , snake_case = 4, -2, 5 print(F'''u is {u}''' ) print(F'''v is {v}''' ) print(F'''uv^T is {u * v.transpose()}''' ) # Sherman Morrison print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(A , A )}''' ) def __magic_name__ ( ) -> None: import doctest doctest.testmod() testa()
332
'''simple docstring''' import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def __magic_name__ ( A ) -> Tuple: snake_case = [] embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', F'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', F'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', F'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', F'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def __magic_name__ ( A , A ) -> Optional[int]: snake_case = [] attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def __magic_name__ ( A ) -> List[Any]: snake_case = [] token.append((F'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') ) return token def __magic_name__ ( ) -> Dict: snake_case = [] head.append(('layernorm.weight', 'norm.weight') ) head.append(('layernorm.bias', 'norm.bias') ) head.append(('classifier.weight', 'head.weight') ) head.append(('classifier.bias', 'head.bias') ) return head def __magic_name__ ( A , A , A , A ) -> int: snake_case = 'imagenet-1k-id2label.json' snake_case = 1_0_0_0 snake_case = 'huggingface/label-files' snake_case = num_labels snake_case = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) ) snake_case = {int(A ): v for k, v in idalabel.items()} snake_case = idalabel snake_case = {v: k for k, v in idalabel.items()} snake_case = snake_case = CvtConfig(num_labels=A , idalabel=A , labelaid=A ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13": snake_case = [1, 2, 1_0] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21": snake_case = [1, 4, 1_6] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: snake_case = [2, 2, 2_0] snake_case = [3, 1_2, 1_6] snake_case = [1_9_2, 7_6_8, 1_0_2_4] snake_case = CvtForImageClassification(A ) snake_case = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) snake_case = image_size snake_case = torch.load(A , map_location=torch.device('cpu' ) ) snake_case = OrderedDict() snake_case = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: snake_case = list_of_state_dict + cls_token(A ) snake_case = list_of_state_dict + embeddings(A ) for cnt in range(config.depth[idx] ): snake_case = list_of_state_dict + attention(A , A ) snake_case = list_of_state_dict + final() for gg in list_of_state_dict: print(A ) for i in range(len(A ) ): snake_case = original_weights[list_of_state_dict[i][1]] model.load_state_dict(A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( "--cvt_model", default="cvt-w24", type=str, help="Name of the cvt model you'd like to convert.", ) parser.add_argument( "--image_size", default=3_8_4, type=int, help="Input Image Size", ) parser.add_argument( "--cvt_file_name", default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth", type=str, help="Input Image Size", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) lowerCAmelCase_ = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
332
1
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def __magic_name__ ( A , A , A ) -> Any: # Initialise PyTorch model snake_case = BertConfig.from_json_file(A ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case = BertForPreTraining(A ) # Load weights from tf checkpoint load_tf_weights_in_bert(A , A , A ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , A ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--bert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
332
'''simple docstring''' from pathlib import Path import fire def __magic_name__ ( A , A , A ) -> Union[str, Any]: snake_case = Path(A ) snake_case = Path(A ) dest_dir.mkdir(exist_ok=A ) for path in src_dir.iterdir(): snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n] snake_case = dest_dir.joinpath(path.name ) print(A ) dest_path.open('w' ).write('\n'.join(A ) ) if __name__ == "__main__": fire.Fire(minify)
332
1
'''simple docstring''' class lowerCamelCase : def __init__( self, lowercase_ ) -> None: snake_case = set_counts snake_case = max(lowercase_ ) snake_case = len(lowercase_ ) snake_case = [1] * num_sets snake_case = list(range(lowercase_ ) ) def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> bool: snake_case = self.get_parent(lowercase_ ) snake_case = self.get_parent(lowercase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] snake_case = 0 snake_case = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 snake_case = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] snake_case = 0 snake_case = src_parent snake_case = self.set_counts[src_parent] snake_case = max(self.max_set, lowercase_ ) return True def _lowerCamelCase ( self, lowercase_ ) -> int: if self.parents[disj_set] == disj_set: return disj_set snake_case = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
332
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) lowerCAmelCase_ = pytest.mark.integration @pytest.mark.parametrize('path' , ['paws', 'csv'] ) def __magic_name__ ( A , A ) -> Union[str, Any]: inspect_dataset(A , A ) snake_case = path + '.py' assert script_name in os.listdir(A ) assert "__pycache__" not in os.listdir(A ) @pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.parametrize('path' , ['accuracy'] ) def __magic_name__ ( A , A ) -> int: inspect_metric(A , A ) snake_case = path + '.py' assert script_name in os.listdir(A ) assert "__pycache__" not in os.listdir(A ) @pytest.mark.parametrize( 'path, config_name, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> List[str]: snake_case = get_dataset_config_info(A , config_name=A ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __magic_name__ ( A , A , A ) -> Any: with pytest.raises(A ): get_dataset_config_info(A , config_name=A ) @pytest.mark.parametrize( 'path, expected' , [ ('squad', 'plain_text'), ('acronym_identification', 'default'), ('lhoestq/squad', 'plain_text'), ('lhoestq/test', 'default'), ('lhoestq/demo1', 'lhoestq--demo1'), ('dalle-mini/wit', 'dalle-mini--wit'), ] , ) def __magic_name__ ( A , A ) -> Dict: snake_case = get_dataset_config_names(A ) assert expected in config_names @pytest.mark.parametrize( 'path, expected_configs, expected_splits_in_first_config' , [ ('squad', ['plain_text'], ['train', 'validation']), ('dalle-mini/wit', ['dalle-mini--wit'], ['train']), ('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> List[str]: snake_case = get_dataset_infos(A ) assert list(infos.keys() ) == expected_configs snake_case = expected_configs[0] assert expected_config in infos snake_case = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( 'path, expected_config, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> Any: snake_case = get_dataset_infos(A ) assert expected_config in infos snake_case = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __magic_name__ ( A , A , A ) -> int: with pytest.raises(A ): get_dataset_split_names(A , config_name=A )
332
1
'''simple docstring''' # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny model through reduction of a normal pre-trained model, but keeping the # full vocab, merges file, and thus also resulting in a larger model due to a large vocab size. # This gives ~3MB in total for all files. # # If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated # # # It will be used then as "stas/tiny-wmt19-en-de" # Build from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration lowerCAmelCase_ = "facebook/wmt19-en-de" lowerCAmelCase_ = FSMTTokenizer.from_pretrained(mname) # get the correct vocab sizes, etc. from the master model lowerCAmelCase_ = FSMTConfig.from_pretrained(mname) config.update( dict( d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) ) lowerCAmelCase_ = FSMTForConditionalGeneration(config) print(f"num of params {tiny_model.num_parameters()}") # Test lowerCAmelCase_ = tokenizer(["Making tiny model"], return_tensors="pt") lowerCAmelCase_ = tiny_model(**batch) print("test output:", len(outputs.logits[0])) # Save lowerCAmelCase_ = "tiny-wmt19-en-de" tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-de
332
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { "configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"], "processing_git": ["GitProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ "GIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GitForCausalLM", "GitModel", "GitPreTrainedModel", "GitVisionModel", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
332
1
'''simple docstring''' import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def __magic_name__ ( A ) -> Union[str, Any]: snake_case , snake_case = image.size snake_case , snake_case = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32 snake_case = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) snake_case = np.array(A ).astype(np.floataa ) / 255.0 snake_case = image[None].transpose(0 , 3 , 1 , 2 ) snake_case = torch.from_numpy(A ) return 2.0 * image - 1.0 class lowerCamelCase ( __lowerCAmelCase ): def __init__( self, lowercase_, lowercase_, lowercase_, ) -> Dict: super().__init__() self.register_modules(vqvae=lowercase_, unet=lowercase_, scheduler=lowercase_ ) @torch.no_grad() def __call__( self, lowercase_ = None, lowercase_ = 1, lowercase_ = 100, lowercase_ = 0.0, lowercase_ = None, lowercase_ = "pil", lowercase_ = True, ) -> Union[Tuple, ImagePipelineOutput]: if isinstance(lowercase_, PIL.Image.Image ): snake_case = 1 elif isinstance(lowercase_, torch.Tensor ): snake_case = image.shape[0] else: raise ValueError(F'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowercase_ )}''' ) if isinstance(lowercase_, PIL.Image.Image ): snake_case = preprocess(lowercase_ ) snake_case , snake_case = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image snake_case = (batch_size, self.unet.config.in_channels // 2, height, width) snake_case = next(self.unet.parameters() ).dtype snake_case = randn_tensor(lowercase_, generator=lowercase_, device=self.device, dtype=lowercase_ ) snake_case = image.to(device=self.device, dtype=lowercase_ ) # set timesteps and move to the correct device self.scheduler.set_timesteps(lowercase_, device=self.device ) snake_case = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler snake_case = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] snake_case = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) snake_case = {} if accepts_eta: snake_case = eta for t in self.progress_bar(lowercase_ ): # concat latents and low resolution image in the channel dimension. snake_case = torch.cat([latents, image], dim=1 ) snake_case = self.scheduler.scale_model_input(lowercase_, lowercase_ ) # predict the noise residual snake_case = self.unet(lowercase_, lowercase_ ).sample # compute the previous noisy sample x_t -> x_t-1 snake_case = self.scheduler.step(lowercase_, lowercase_, lowercase_, **lowercase_ ).prev_sample # decode the image latents with the VQVAE snake_case = self.vqvae.decode(lowercase_ ).sample snake_case = torch.clamp(lowercase_, -1.0, 1.0 ) snake_case = image / 2 + 0.5 snake_case = image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": snake_case = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowercase_ )
332
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu lowerCAmelCase_ = False class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCamelCase ( self ) -> List[Any]: return 12 @property def _lowerCamelCase ( self ) -> Dict: return 12 @property def _lowerCamelCase ( self ) -> List[Any]: return 32 @property def _lowerCamelCase ( self ) -> List[Any]: torch.manual_seed(0 ) snake_case = VQModel( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, ) return model @property def _lowerCamelCase ( self ) -> List[Any]: snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def _lowerCamelCase ( self ) -> Tuple: torch.manual_seed(0 ) snake_case = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(lowercase_ ) @property def _lowerCamelCase ( self ) -> str: torch.manual_seed(0 ) snake_case = 12 snake_case = 12 snake_case = { 'attention_bias': True, 'cross_attention_dim': 32, 'attention_head_dim': height * width, 'num_attention_heads': 1, 'num_vector_embeds': self.num_embed, 'num_embeds_ada_norm': self.num_embeds_ada_norm, 'norm_num_groups': 32, 'sample_size': width, 'activation_fn': 'geglu-approximate', } snake_case = TransformeraDModel(**lowercase_ ) return model def _lowerCamelCase ( self ) -> Tuple: snake_case = 'cpu' snake_case = self.dummy_vqvae snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_transformer snake_case = VQDiffusionScheduler(self.num_embed ) snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ ) snake_case = VQDiffusionPipeline( vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'teddy bear playing in the pool' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( [prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = 'cpu' snake_case = self.dummy_vqvae snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_transformer snake_case = VQDiffusionScheduler(self.num_embed ) snake_case = LearnedClassifierFreeSamplingEmbeddings( learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length ) snake_case = VQDiffusionPipeline( vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'teddy bear playing in the pool' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( [prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self ) -> str: snake_case = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' ) snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' ) snake_case = pipeline.to(lowercase_ ) pipeline.set_progress_bar_config(disable=lowercase_ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipeline( 'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', ) snake_case = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
332
1
'''simple docstring''' def __magic_name__ ( A ) -> int: snake_case = 0 while num > 0: digit_sum += num % 1_0 num //= 1_0 return digit_sum def __magic_name__ ( A = 1_0_0 ) -> int: snake_case = 1 snake_case = 2 for i in range(2 , max_n + 1 ): snake_case = pre_numerator snake_case = 2 * i // 3 if i % 3 == 0 else 1 snake_case = cur_numerator snake_case = e_cont * pre_numerator + temp return sum_digits(A ) if __name__ == "__main__": print(f"{solution() = }")
332
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase ( metaclass=__lowerCAmelCase ): snake_case_ = ['''note_seq'''] def __init__( self, *lowercase_, **lowercase_ ) -> str: requires_backends(self, ['note_seq'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]: requires_backends(cls, ['note_seq'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]: requires_backends(cls, ['note_seq'] )
332
1
'''simple docstring''' import os def __magic_name__ ( ) -> Tuple: with open(os.path.dirname(A ) + '/p022_names.txt' ) as file: snake_case = str(file.readlines()[0] ) snake_case = names.replace('"' , '' ).split(',' ) names.sort() snake_case = 0 snake_case = 0 for i, name in enumerate(A ): for letter in name: name_score += ord(A ) - 6_4 total_score += (i + 1) * name_score snake_case = 0 return total_score if __name__ == "__main__": print(solution())
332
'''simple docstring''' import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor lowerCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase ( __lowerCAmelCase ): def __init__( self, *lowercase_, **lowercase_ ) -> None: warnings.warn( 'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DPTImageProcessor instead.', lowercase_, ) super().__init__(*lowercase_, **lowercase_ )
332
1
'''simple docstring''' from pathlib import Path import fire def __magic_name__ ( A , A , A ) -> Union[str, Any]: snake_case = Path(A ) snake_case = Path(A ) dest_dir.mkdir(exist_ok=A ) for path in src_dir.iterdir(): snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n] snake_case = dest_dir.joinpath(path.name ) print(A ) dest_path.open('w' ).write('\n'.join(A ) ) if __name__ == "__main__": fire.Fire(minify)
332
'''simple docstring''' import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset lowerCAmelCase_ = pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) lowerCAmelCase_ = dataset.iloc[:, 1:2].values lowerCAmelCase_ = dataset.iloc[:, 2].values lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0) lowerCAmelCase_ = PolynomialFeatures(degree=4) lowerCAmelCase_ = poly_reg.fit_transform(X) lowerCAmelCase_ = LinearRegression() pol_reg.fit(X_poly, y) def __magic_name__ ( ) -> Any: plt.scatter(A , A , color='red' ) plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' ) plt.title('Truth or Bluff (Linear Regression)' ) plt.xlabel('Position level' ) plt.ylabel('Salary' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
332
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = { "configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"], "processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["VisionTextDualEncoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["FlaxVisionTextDualEncoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["TFVisionTextDualEncoderModel"] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
332
'''simple docstring''' import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''''' snake_case_ = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) snake_case_ = None # compression type in fsspec. ex: "gzip" snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str: super().__init__(self, **lowercase_ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode snake_case = fsspec.open( lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={ 'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459 'trust_env': True, # Enable reading proxy env variables. **(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed. }, **(target_options or {}), ) snake_case = os.path.basename(self.file.path.split('::' )[0] ) snake_case = ( self.compressed_name[: self.compressed_name.rindex('.' )] if '.' in self.compressed_name else self.compressed_name ) snake_case = None @classmethod def _lowerCamelCase ( cls, lowercase_ ) -> Any: # compressed file paths are always relative to the archive root return super()._strip_protocol(lowercase_ ).lstrip('/' ) def _lowerCamelCase ( self ) -> Optional[Any]: if self.dir_cache is None: snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name} snake_case = {f['name']: f} def _lowerCamelCase ( self, lowercase_ ) -> str: return self.file.open().read() def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any: snake_case = self._strip_protocol(lowercase_ ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''bz2''' snake_case_ = '''bz2''' snake_case_ = '''.bz2''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''gzip''' snake_case_ = '''gzip''' snake_case_ = '''.gz''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''lz4''' snake_case_ = '''lz4''' snake_case_ = '''.lz4''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''xz''' snake_case_ = '''xz''' snake_case_ = '''.xz''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''zstd''' snake_case_ = '''zstd''' snake_case_ = '''.zst''' def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]: super().__init__( fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 snake_case = self.file.__enter__ class lowerCamelCase : def __init__( self, lowercase_ ) -> List[Any]: snake_case = file_ def __enter__( self ) -> Dict: self._file.__enter__() return self def __exit__( self, *lowercase_, **lowercase_ ) -> Dict: self._file.__exit__(*lowercase_, **lowercase_ ) def __iter__( self ) -> List[str]: return iter(self._file ) def _lowerCamelCase ( self ) -> List[str]: return next(self._file ) def __getattr__( self, lowercase_ ) -> List[Any]: return getattr(self._file, lowercase_ ) def fixed_enter(*lowercase_, **lowercase_ ): return WrappedFile(_enter(*lowercase_, **lowercase_ ) ) snake_case = fixed_enter
332
1
'''simple docstring''' def __magic_name__ ( A ) -> list: if any(not isinstance(A , A ) or x < 0 for x in sequence ): raise TypeError('Sequence must be list of non-negative integers' ) for _ in range(len(A ) ): for i, (rod_upper, rod_lower) in enumerate(zip(A , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A , A , A ) -> int | float: if len(A ) == 0: raise ValueError('find_max() arg is an empty sequence' ) if ( left >= len(A ) or left < -len(A ) or right >= len(A ) or right < -len(A ) ): raise IndexError('list index out of range' ) if left == right: return nums[left] snake_case = (left + right) >> 1 # the middle snake_case = find_max(A , A , A ) # find max in range[left, mid] snake_case = find_max(A , mid + 1 , A ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
332
1
'''simple docstring''' import math def __magic_name__ ( A , A ) -> List[str]: if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(A ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('This should never happen' ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowerCAmelCase_ = "Enter the base and the power separated by a comma: " lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(",")) lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(",")) # We find the log of each number, using the function res(), which takes two # arguments. lowerCAmelCase_ = res(xa, ya) lowerCAmelCase_ = res(xa, ya) # We check for the largest number if resa > resa: print("Largest number is", xa, "^", ya) elif resa > resa: print("Largest number is", xa, "^", ya) else: print("Both are equal")
332
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = 42 class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ): @register_to_config def __init__( self, lowercase_ = 3, lowercase_ = 3, lowercase_ = ("DownEncoderBlock2D",), lowercase_ = ("UpDecoderBlock2D",), lowercase_ = (64,), lowercase_ = 1, lowercase_ = "silu", lowercase_ = 3, lowercase_ = 32, lowercase_ = 256, lowercase_ = 32, lowercase_ = None, lowercase_ = 0.18_215, lowercase_ = "group", ) -> str: super().__init__() # pass init params to Encoder snake_case = Encoder( in_channels=lowercase_, out_channels=lowercase_, down_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, double_z=lowercase_, ) snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels snake_case = nn.Convad(lowercase_, lowercase_, 1 ) snake_case = VectorQuantizer(lowercase_, lowercase_, beta=0.25, remap=lowercase_, sane_index_shape=lowercase_ ) snake_case = nn.Convad(lowercase_, lowercase_, 1 ) # pass init params to Decoder snake_case = Decoder( in_channels=lowercase_, out_channels=lowercase_, up_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, norm_type=lowercase_, ) @apply_forward_hook def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> VQEncoderOutput: snake_case = self.encoder(lowercase_ ) snake_case = self.quant_conv(lowercase_ ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowercase_ ) @apply_forward_hook def _lowerCamelCase ( self, lowercase_, lowercase_ = False, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: # also go through quantization layer if not force_not_quantize: snake_case , snake_case , snake_case = self.quantize(lowercase_ ) else: snake_case = h snake_case = self.post_quant_conv(lowercase_ ) snake_case = self.decoder(lowercase_, quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: snake_case = sample snake_case = self.encode(lowercase_ ).latents snake_case = self.decode(lowercase_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowercase_ )
332
1
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def __magic_name__ ( A ) -> Any: snake_case = filter(lambda A : p.requires_grad , model.parameters() ) snake_case = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase_ = logging.getLogger(__name__) def __magic_name__ ( A , A ) -> Optional[Any]: if metric == "rouge2": snake_case = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": snake_case = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": snake_case = '{val_avg_em:.4f}-{step_count}' else: raise NotImplementedError( F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' ' function.' ) snake_case = ModelCheckpoint( dirpath=A , filename=A , monitor=F'''val_{metric}''' , mode='max' , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def __magic_name__ ( A , A ) -> int: return EarlyStopping( monitor=F'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=A , verbose=A , ) class lowerCamelCase ( pl.Callback ): def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> List[str]: snake_case = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(lowercase_ ) @rank_zero_only def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_=True ) -> None: logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) snake_case = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results snake_case = Path(pl_module.hparams.output_dir ) if type_path == "test": snake_case = od / 'test_results.txt' snake_case = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. snake_case = od / F'''{type_path}_results/{trainer.global_step:05d}.txt''' snake_case = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=lowercase_ ) generations_file.parent.mkdir(exist_ok=lowercase_ ) with open(lowercase_, 'a+' ) as writer: for key in sorted(lowercase_ ): if key in ["log", "progress_bar", "preds"]: continue snake_case = metrics[key] if isinstance(lowercase_, torch.Tensor ): snake_case = val.item() snake_case = F'''{key}: {val:.6f}\n''' writer.write(lowercase_ ) if not save_generations: return if "preds" in metrics: snake_case = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(lowercase_ ) @rank_zero_only def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> str: try: snake_case = pl_module.model.model.num_parameters() except AttributeError: snake_case = pl_module.model.num_parameters() snake_case = count_trainable_parameters(lowercase_ ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} ) @rank_zero_only def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> int: save_json(pl_module.metrics, pl_module.metrics_save_path ) return self._write_logs(lowercase_, lowercase_, 'test' ) @rank_zero_only def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> int: save_json(pl_module.metrics, pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
332
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int: snake_case = [0] snake_case = 42 for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target snake_case = 0 # the area corresponding to the grid that gives the product closest to target snake_case = 0 # an estimate of b, using the quadratic formula snake_case = 42 # the largest integer less than b_estimate snake_case = 42 # the largest integer less than b_estimate snake_case = 42 # the triangle number corresponding to b_floor snake_case = 42 # the triangle number corresponding to b_ceil snake_case = 42 for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 snake_case = floor(A ) snake_case = ceil(A ) snake_case = triangle_numbers[b_floor] snake_case = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): snake_case = triangle_b_first_guess * triangle_a snake_case = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): snake_case = triangle_b_second_guess * triangle_a snake_case = idx_a * b_ceil return area if __name__ == "__main__": print(f"{solution() = }")
332
1
'''simple docstring''' lowerCAmelCase_ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" lowerCAmelCase_ = [{"type": "code", "content": INSTALL_CONTENT}] lowerCAmelCase_ = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
332
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = { "configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"], "processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["VisionTextDualEncoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["FlaxVisionTextDualEncoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["TFVisionTextDualEncoderModel"] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
332
1
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) lowerCAmelCase_ = pytest.mark.integration @pytest.mark.parametrize('path' , ['paws', 'csv'] ) def __magic_name__ ( A , A ) -> Union[str, Any]: inspect_dataset(A , A ) snake_case = path + '.py' assert script_name in os.listdir(A ) assert "__pycache__" not in os.listdir(A ) @pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.parametrize('path' , ['accuracy'] ) def __magic_name__ ( A , A ) -> int: inspect_metric(A , A ) snake_case = path + '.py' assert script_name in os.listdir(A ) assert "__pycache__" not in os.listdir(A ) @pytest.mark.parametrize( 'path, config_name, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> List[str]: snake_case = get_dataset_config_info(A , config_name=A ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __magic_name__ ( A , A , A ) -> Any: with pytest.raises(A ): get_dataset_config_info(A , config_name=A ) @pytest.mark.parametrize( 'path, expected' , [ ('squad', 'plain_text'), ('acronym_identification', 'default'), ('lhoestq/squad', 'plain_text'), ('lhoestq/test', 'default'), ('lhoestq/demo1', 'lhoestq--demo1'), ('dalle-mini/wit', 'dalle-mini--wit'), ] , ) def __magic_name__ ( A , A ) -> Dict: snake_case = get_dataset_config_names(A ) assert expected in config_names @pytest.mark.parametrize( 'path, expected_configs, expected_splits_in_first_config' , [ ('squad', ['plain_text'], ['train', 'validation']), ('dalle-mini/wit', ['dalle-mini--wit'], ['train']), ('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> List[str]: snake_case = get_dataset_infos(A ) assert list(infos.keys() ) == expected_configs snake_case = expected_configs[0] assert expected_config in infos snake_case = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( 'path, expected_config, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> Any: snake_case = get_dataset_infos(A ) assert expected_config in infos snake_case = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __magic_name__ ( A , A , A ) -> int: with pytest.raises(A ): get_dataset_split_names(A , config_name=A )
332
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCamelCase : snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} ) snake_case_ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) snake_case_ = field( default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , ) snake_case_ = field( default=64 , metadata={ '''help''': ( '''The maximum number of tokens for the question. Questions longer than this will ''' '''be truncated to this length.''' ) } , ) snake_case_ = field( default=30 , metadata={ '''help''': ( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ) } , ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} ) snake_case_ = field( default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) snake_case_ = field( default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) snake_case_ = field( default=0 , metadata={ '''help''': ( '''language id of input for language-specific xlm models (see''' ''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)''' ) } , ) snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} ) class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''train''' snake_case_ = '''dev''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int: snake_case = args snake_case = is_language_sensitive snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(lowercase_, lowercase_ ): try: snake_case = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) snake_case = mode # Load data features from cache or dataset file snake_case = 'v2' if args.version_2_with_negative else 'v1' snake_case = os.path.join( cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. snake_case = cached_features_file + '.lock' with FileLock(lowercase_ ): if os.path.exists(lowercase_ ) and not args.overwrite_cache: snake_case = time.time() snake_case = torch.load(lowercase_ ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. snake_case = self.old_features['features'] snake_case = self.old_features.get('dataset', lowercase_ ) snake_case = self.old_features.get('examples', lowercase_ ) logger.info( F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' ' future run' ) else: if mode == Split.dev: snake_case = self.processor.get_dev_examples(args.data_dir ) else: snake_case = self.processor.get_train_examples(args.data_dir ) snake_case , snake_case = squad_convert_examples_to_features( examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, ) snake_case = time.time() torch.save( {'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self ) -> Tuple: return len(self.features ) def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]: # Convert to Tensors and build dataset snake_case = self.features[i] snake_case = torch.tensor(feature.input_ids, dtype=torch.long ) snake_case = torch.tensor(feature.attention_mask, dtype=torch.long ) snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long ) snake_case = torch.tensor(feature.cls_index, dtype=torch.long ) snake_case = torch.tensor(feature.p_mask, dtype=torch.float ) snake_case = torch.tensor(feature.is_impossible, dtype=torch.float ) snake_case = { 'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'cls_index': cls_index, 'p_mask': p_mask} ) if self.args.version_2_with_negative: inputs.update({'is_impossible': is_impossible} ) if self.is_language_sensitive: inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: snake_case = torch.tensor(feature.start_position, dtype=torch.long ) snake_case = torch.tensor(feature.end_position, dtype=torch.long ) inputs.update({'start_positions': start_positions, 'end_positions': end_positions} ) return inputs
332
1
'''simple docstring''' import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = ['''image_processor''', '''tokenizer'''] snake_case_ = '''FlavaImageProcessor''' snake_case_ = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self, lowercase_=None, lowercase_=None, **lowercase_ ) -> str: snake_case = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.', lowercase_, ) snake_case = kwargs.pop('feature_extractor' ) snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(lowercase_, lowercase_ ) snake_case = self.image_processor def __call__( self, lowercase_ = None, lowercase_ = None, lowercase_ = True, lowercase_ = False, lowercase_ = False, lowercase_ = None, lowercase_ = 0, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = False, lowercase_ = False, lowercase_ = False, lowercase_ = False, lowercase_ = True, lowercase_ = None, **lowercase_, ) -> List[Any]: if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: snake_case = self.tokenizer( text=lowercase_, add_special_tokens=lowercase_, padding=lowercase_, truncation=lowercase_, max_length=lowercase_, stride=lowercase_, pad_to_multiple_of=lowercase_, return_token_type_ids=lowercase_, return_attention_mask=lowercase_, return_overflowing_tokens=lowercase_, return_special_tokens_mask=lowercase_, return_offsets_mapping=lowercase_, return_length=lowercase_, verbose=lowercase_, return_tensors=lowercase_, **lowercase_, ) if images is not None: snake_case = self.image_processor( lowercase_, return_image_mask=lowercase_, return_codebook_pixels=lowercase_, return_tensors=lowercase_, **lowercase_, ) if text is not None and images is not None: encoding.update(lowercase_ ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowercase_ ), tensor_type=lowercase_ ) def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> Tuple: return self.tokenizer.batch_decode(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> Optional[Any]: return self.tokenizer.decode(*lowercase_, **lowercase_ ) @property def _lowerCamelCase ( self ) -> Any: snake_case = self.tokenizer.model_input_names snake_case = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _lowerCamelCase ( self ) -> Optional[int]: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', lowercase_, ) return self.image_processor_class @property def _lowerCamelCase ( self ) -> int: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', lowercase_, ) return self.image_processor
332
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def __magic_name__ ( A , A , A ) -> Any: # Initialise PyTorch model snake_case = BertConfig.from_json_file(A ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case = BertForPreTraining(A ) # Load weights from tf checkpoint load_tf_weights_in_bert(A , A , A ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , A ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--bert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
332
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu lowerCAmelCase_ = False class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCamelCase ( self ) -> List[Any]: return 12 @property def _lowerCamelCase ( self ) -> Dict: return 12 @property def _lowerCamelCase ( self ) -> List[Any]: return 32 @property def _lowerCamelCase ( self ) -> List[Any]: torch.manual_seed(0 ) snake_case = VQModel( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, ) return model @property def _lowerCamelCase ( self ) -> List[Any]: snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def _lowerCamelCase ( self ) -> Tuple: torch.manual_seed(0 ) snake_case = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(lowercase_ ) @property def _lowerCamelCase ( self ) -> str: torch.manual_seed(0 ) snake_case = 12 snake_case = 12 snake_case = { 'attention_bias': True, 'cross_attention_dim': 32, 'attention_head_dim': height * width, 'num_attention_heads': 1, 'num_vector_embeds': self.num_embed, 'num_embeds_ada_norm': self.num_embeds_ada_norm, 'norm_num_groups': 32, 'sample_size': width, 'activation_fn': 'geglu-approximate', } snake_case = TransformeraDModel(**lowercase_ ) return model def _lowerCamelCase ( self ) -> Tuple: snake_case = 'cpu' snake_case = self.dummy_vqvae snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_transformer snake_case = VQDiffusionScheduler(self.num_embed ) snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ ) snake_case = VQDiffusionPipeline( vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'teddy bear playing in the pool' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( [prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = 'cpu' snake_case = self.dummy_vqvae snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_transformer snake_case = VQDiffusionScheduler(self.num_embed ) snake_case = LearnedClassifierFreeSamplingEmbeddings( learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length ) snake_case = VQDiffusionPipeline( vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'teddy bear playing in the pool' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( [prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self ) -> str: snake_case = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' ) snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' ) snake_case = pipeline.to(lowercase_ ) pipeline.set_progress_bar_config(disable=lowercase_ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipeline( 'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', ) snake_case = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A ) -> list: if len(A ) == 0: return [] snake_case , snake_case = min(A ), max(A ) snake_case = int(max_value - min_value ) + 1 snake_case = [[] for _ in range(A )] for i in my_list: buckets[int(i - min_value )].append(A ) return [v for bucket in buckets for v in sorted(A )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
332
1
'''simple docstring''' import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowerCAmelCase_ = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class lowerCamelCase ( unittest.TestCase ): def __init__( self, lowercase_, lowercase_=7, lowercase_=3, lowercase_=18, lowercase_=30, lowercase_=400, lowercase_=None, lowercase_=True, lowercase_=True, lowercase_=None, ) -> List[Any]: snake_case = size if size is not None else {'height': 20, 'width': 20} snake_case = parent snake_case = batch_size snake_case = num_channels snake_case = image_size snake_case = min_resolution snake_case = max_resolution snake_case = size snake_case = do_normalize snake_case = do_convert_rgb snake_case = [512, 1024, 2048, 4096] snake_case = patch_size if patch_size is not None else {'height': 16, 'width': 16} def _lowerCamelCase ( self ) -> int: return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def _lowerCamelCase ( self ) -> Any: snake_case = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg' snake_case = Image.open(requests.get(lowercase_, stream=lowercase_ ).raw ).convert('RGB' ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCamelCase ( __lowerCAmelCase , unittest.TestCase ): snake_case_ = PixaStructImageProcessor if is_vision_available() else None def _lowerCamelCase ( self ) -> List[str]: snake_case = PixaStructImageProcessingTester(self ) @property def _lowerCamelCase ( self ) -> Tuple: return self.image_processor_tester.prepare_image_processor_dict() def _lowerCamelCase ( self ) -> List[Any]: snake_case = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_, 'do_normalize' ) ) self.assertTrue(hasattr(lowercase_, 'do_convert_rgb' ) ) def _lowerCamelCase ( self ) -> Any: snake_case = self.image_processor_tester.prepare_dummy_image() snake_case = self.image_processing_class(**self.image_processor_dict ) snake_case = 2048 snake_case = image_processor(lowercase_, return_tensors='pt', max_patches=lowercase_ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0_606 ), atol=1E-3, rtol=1E-3 ) ) def _lowerCamelCase ( self ) -> int: # Initialize image_processor snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_, Image.Image ) # Test not batched input snake_case = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input snake_case = image_processor( image_inputs[0], return_tensors='pt', max_patches=lowercase_ ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched snake_case = image_processor( lowercase_, return_tensors='pt', max_patches=lowercase_ ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) def _lowerCamelCase ( self ) -> Any: # Initialize image_processor snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_, Image.Image ) # Test not batched input snake_case = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 snake_case = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(lowercase_ ): snake_case = image_processor( image_inputs[0], return_tensors='pt', max_patches=lowercase_ ).flattened_patches snake_case = 'Hello' snake_case = image_processor( image_inputs[0], return_tensors='pt', max_patches=lowercase_, header_text=lowercase_ ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched snake_case = image_processor( lowercase_, return_tensors='pt', max_patches=lowercase_, header_text=lowercase_ ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) def _lowerCamelCase ( self ) -> int: # Initialize image_processor snake_case = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowercase_, numpify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_, np.ndarray ) snake_case = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input snake_case = image_processor( image_inputs[0], return_tensors='pt', max_patches=lowercase_ ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched snake_case = image_processor( lowercase_, return_tensors='pt', max_patches=lowercase_ ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) def _lowerCamelCase ( self ) -> Any: # Initialize image_processor snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowercase_, torchify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_, torch.Tensor ) # Test not batched input snake_case = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input snake_case = image_processor( image_inputs[0], return_tensors='pt', max_patches=lowercase_ ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched snake_case = image_processor( lowercase_, return_tensors='pt', max_patches=lowercase_ ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCamelCase ( __lowerCAmelCase , unittest.TestCase ): snake_case_ = PixaStructImageProcessor if is_vision_available() else None def _lowerCamelCase ( self ) -> Union[str, Any]: snake_case = PixaStructImageProcessingTester(self, num_channels=4 ) snake_case = 3 @property def _lowerCamelCase ( self ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def _lowerCamelCase ( self ) -> List[str]: snake_case = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_, 'do_normalize' ) ) self.assertTrue(hasattr(lowercase_, 'do_convert_rgb' ) ) def _lowerCamelCase ( self ) -> Any: # Initialize image_processor snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_, Image.Image ) # Test not batched input snake_case = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input snake_case = image_processor( image_inputs[0], return_tensors='pt', max_patches=lowercase_ ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched snake_case = image_processor( lowercase_, return_tensors='pt', max_patches=lowercase_ ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
332
'''simple docstring''' def __magic_name__ ( A ) -> float: return 1_0 - x * x def __magic_name__ ( A , A ) -> float: # Bolzano theory in order to find if there is a root between a and b if equation(A ) * equation(A ) >= 0: raise ValueError('Wrong space!' ) snake_case = a while (b - a) >= 0.01: # Find middle point snake_case = (a + b) / 2 # Check if middle point is root if equation(A ) == 0.0: break # Decide the side to repeat the steps if equation(A ) * equation(A ) < 0: snake_case = c else: snake_case = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
332
1
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( 'split_dict' , [ SplitDict(), SplitDict({'train': SplitInfo(name='train' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='my_dataset' )} ), SplitDict({'train': SplitInfo(name='train' , num_bytes=1_3_3_7 , num_examples=4_2 )} ), SplitDict({'train': SplitInfo()} ), ] , ) def __magic_name__ ( A ) -> Optional[int]: snake_case = split_dict._to_yaml_list() assert len(A ) == len(A ) snake_case = SplitDict._from_yaml_list(A ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump snake_case = None # the split name of split_dict takes over the name of the split info object snake_case = split_name assert split_dict == reloaded @pytest.mark.parametrize( 'split_info' , [SplitInfo(), SplitInfo(dataset_name=A ), SplitInfo(dataset_name='my_dataset' )] ) def __magic_name__ ( A ) -> int: # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files snake_case = asdict(SplitDict({'train': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
332
'''simple docstring''' import pytest lowerCAmelCase_ = "__dummy_dataset1__" lowerCAmelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n" @pytest.fixture def __magic_name__ ( ) -> List[Any]: return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def __magic_name__ ( ) -> Union[str, Any]: return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def __magic_name__ ( A , A , A ) -> Optional[int]: snake_case = dataset_loading_script_name snake_case = tmp_path / 'datasets' / script_name script_dir.mkdir(parents=A ) snake_case = script_dir / F'''{script_name}.py''' with open(A , 'w' ) as f: f.write(A ) return str(A )
332
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase ( metaclass=__lowerCAmelCase ): snake_case_ = ['''torch''', '''scipy'''] def __init__( self, *lowercase_, **lowercase_ ) -> Any: requires_backends(self, ['torch', 'scipy'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]: requires_backends(cls, ['torch', 'scipy'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]: requires_backends(cls, ['torch', 'scipy'] )
332
'''simple docstring''' from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time lowerCAmelCase_ = Lock() def __magic_name__ ( A , A , A , A , A , A , A ) -> Any: global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 1_0 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(A ) process_lock.release() # receive your right neighbor's value process_lock.acquire() snake_case = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left snake_case = min(A , A ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(A ) process_lock.release() # receive your left neighbor's value process_lock.acquire() snake_case = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right snake_case = max(A , A ) # after all swaps are performed, send the values back to main result_pipe[1].send(A ) def __magic_name__ ( A ) -> str: snake_case = [] snake_case = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop snake_case = Pipe() snake_case = Pipe() process_array_.append( Process( target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) snake_case = temp_rs snake_case = temp_rr for i in range(1 , len(A ) - 1 ): snake_case = Pipe() snake_case = Pipe() process_array_.append( Process( target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) snake_case = temp_rs snake_case = temp_rr process_array_.append( Process( target=A , args=( len(A ) - 1, arr[len(A ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(A ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(A ) ): snake_case = result_pipe[p][0].recv() process_array_[p].join() return arr def __magic_name__ ( ) -> Tuple: snake_case = list(range(1_0 , 0 , -1 ) ) print('Initial List' ) print(*A ) snake_case = odd_even_transposition(A ) print('Sorted List\n' ) print(*A ) if __name__ == "__main__": main()
332
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCAmelCase_ = { "configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ "FALCON_PRETRAINED_MODEL_ARCHIVE_LIST", "FalconForCausalLM", "FalconModel", "FalconPreTrainedModel", "FalconForSequenceClassification", "FalconForTokenClassification", "FalconForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
332
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A ) -> None: create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] ) def __magic_name__ ( A , A , A , A , ) -> None: if index == len(A ): print(A ) return for i in range(len(A ) ): if not index_used[i]: current_sequence.append(sequence[i] ) snake_case = True create_state_space_tree(A , A , index + 1 , A ) current_sequence.pop() snake_case = False lowerCAmelCase_ = [3, 1, 2, 4] generate_all_permutations(sequence) lowerCAmelCase_ = ["A", "B", "C"] generate_all_permutations(sequence_a)
332
1
'''simple docstring''' from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time lowerCAmelCase_ = Lock() def __magic_name__ ( A , A , A , A , A , A , A ) -> Any: global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 1_0 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(A ) process_lock.release() # receive your right neighbor's value process_lock.acquire() snake_case = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left snake_case = min(A , A ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(A ) process_lock.release() # receive your left neighbor's value process_lock.acquire() snake_case = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right snake_case = max(A , A ) # after all swaps are performed, send the values back to main result_pipe[1].send(A ) def __magic_name__ ( A ) -> str: snake_case = [] snake_case = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop snake_case = Pipe() snake_case = Pipe() process_array_.append( Process( target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) snake_case = temp_rs snake_case = temp_rr for i in range(1 , len(A ) - 1 ): snake_case = Pipe() snake_case = Pipe() process_array_.append( Process( target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) snake_case = temp_rs snake_case = temp_rr process_array_.append( Process( target=A , args=( len(A ) - 1, arr[len(A ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(A ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(A ) ): snake_case = result_pipe[p][0].recv() process_array_[p].join() return arr def __magic_name__ ( ) -> Tuple: snake_case = list(range(1_0 , 0 , -1 ) ) print('Initial List' ) print(*A ) snake_case = odd_even_transposition(A ) print('Sorted List\n' ) print(*A ) if __name__ == "__main__": main()
332
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''roberta''' def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple: super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ ) snake_case = vocab_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = hidden_act snake_case = intermediate_size snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_vocab_size snake_case = initializer_range snake_case = layer_norm_eps snake_case = position_embedding_type snake_case = use_cache snake_case = classifier_dropout class lowerCamelCase ( __lowerCAmelCase ): @property def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
332
1
'''simple docstring''' import warnings warnings.warn( "memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: " "`from accelerate import find_executable_batch_size` to avoid this warning.", FutureWarning, )
332
'''simple docstring''' import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} lowerCAmelCase_ = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } lowerCAmelCase_ = { "allenai/led-base-16384": 1_6_3_8_4, } class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = LEDTokenizer snake_case_ = ['''input_ids''', '''attention_mask'''] def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int: super().__init__( lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, ) snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space: snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) ) snake_case = add_prefix_space snake_case = pre_tok_class(**lowercase_ ) snake_case = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` snake_case = 'post_processor' snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ ) if tokenizer_component_instance: snake_case = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: snake_case = tuple(state['sep'] ) if "cls" in state: snake_case = tuple(state['cls'] ) snake_case = False if state.get('add_prefix_space', lowercase_ ) != add_prefix_space: snake_case = add_prefix_space snake_case = True if state.get('trim_offsets', lowercase_ ) != trim_offsets: snake_case = trim_offsets snake_case = True if changes_to_apply: snake_case = getattr(lowercase_, state.pop('type' ) ) snake_case = component_class(**lowercase_ ) setattr(self.backend_tokenizer, lowercase_, lowercase_ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _lowerCamelCase ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _lowerCamelCase ( self, lowercase_ ) -> Any: snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value snake_case = value def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding: snake_case = kwargs.get('is_split_into_words', lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding: snake_case = kwargs.get('is_split_into_words', lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._encode_plus(*lowercase_, **lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]: snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ ) return tuple(lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict: snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]: snake_case = [self.sep_token_id] snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict: snake_case = super()._pad( encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, ) # Load from model defaults if return_attention_mask is None: snake_case = 'attention_mask' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: snake_case = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ ) if needs_to_be_padded: snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` snake_case = ( encoded_inputs['global_attention_mask'] + [-1] * difference ) elif self.padding_side == "left": snake_case = [-1] * difference + encoded_inputs[ 'global_attention_mask' ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return encoded_inputs
332
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCAmelCase_ = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["SpeechEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["FlaxSpeechEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
332
'''simple docstring''' import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def __magic_name__ ( A ) -> Tuple: snake_case = [] embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', F'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', F'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', F'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', F'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def __magic_name__ ( A , A ) -> Optional[int]: snake_case = [] attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def __magic_name__ ( A ) -> List[Any]: snake_case = [] token.append((F'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') ) return token def __magic_name__ ( ) -> Dict: snake_case = [] head.append(('layernorm.weight', 'norm.weight') ) head.append(('layernorm.bias', 'norm.bias') ) head.append(('classifier.weight', 'head.weight') ) head.append(('classifier.bias', 'head.bias') ) return head def __magic_name__ ( A , A , A , A ) -> int: snake_case = 'imagenet-1k-id2label.json' snake_case = 1_0_0_0 snake_case = 'huggingface/label-files' snake_case = num_labels snake_case = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) ) snake_case = {int(A ): v for k, v in idalabel.items()} snake_case = idalabel snake_case = {v: k for k, v in idalabel.items()} snake_case = snake_case = CvtConfig(num_labels=A , idalabel=A , labelaid=A ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13": snake_case = [1, 2, 1_0] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21": snake_case = [1, 4, 1_6] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: snake_case = [2, 2, 2_0] snake_case = [3, 1_2, 1_6] snake_case = [1_9_2, 7_6_8, 1_0_2_4] snake_case = CvtForImageClassification(A ) snake_case = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) snake_case = image_size snake_case = torch.load(A , map_location=torch.device('cpu' ) ) snake_case = OrderedDict() snake_case = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: snake_case = list_of_state_dict + cls_token(A ) snake_case = list_of_state_dict + embeddings(A ) for cnt in range(config.depth[idx] ): snake_case = list_of_state_dict + attention(A , A ) snake_case = list_of_state_dict + final() for gg in list_of_state_dict: print(A ) for i in range(len(A ) ): snake_case = original_weights[list_of_state_dict[i][1]] model.load_state_dict(A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( "--cvt_model", default="cvt-w24", type=str, help="Name of the cvt model you'd like to convert.", ) parser.add_argument( "--image_size", default=3_8_4, type=int, help="Input Image Size", ) parser.add_argument( "--cvt_file_name", default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth", type=str, help="Input Image Size", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) lowerCAmelCase_ = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
332
1
'''simple docstring''' from torch import nn def __magic_name__ ( A ) -> int: if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F'''Unsupported activation function: {act_fn}''' )
332
'''simple docstring''' from pathlib import Path import fire def __magic_name__ ( A , A , A ) -> Union[str, Any]: snake_case = Path(A ) snake_case = Path(A ) dest_dir.mkdir(exist_ok=A ) for path in src_dir.iterdir(): snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n] snake_case = dest_dir.joinpath(path.name ) print(A ) dest_path.open('w' ).write('\n'.join(A ) ) if __name__ == "__main__": fire.Fire(minify)
332
1
'''simple docstring''' import math def __magic_name__ ( A , A ) -> int: snake_case = len(A ) snake_case = int(math.floor(math.sqrt(A ) ) ) snake_case = 0 while arr[min(A , A ) - 1] < x: snake_case = step step += int(math.floor(math.sqrt(A ) ) ) if prev >= n: return -1 while arr[prev] < x: snake_case = prev + 1 if prev == min(A , A ): return -1 if arr[prev] == x: return prev return -1 if __name__ == "__main__": lowerCAmelCase_ = input("Enter numbers separated by a comma:\n").strip() lowerCAmelCase_ = [int(item) for item in user_input.split(",")] lowerCAmelCase_ = int(input("Enter the number to be searched:\n")) lowerCAmelCase_ = jump_search(arr, x) if res == -1: print("Number not found!") else: print(f"Number {x} is at index {res}")
332
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) lowerCAmelCase_ = pytest.mark.integration @pytest.mark.parametrize('path' , ['paws', 'csv'] ) def __magic_name__ ( A , A ) -> Union[str, Any]: inspect_dataset(A , A ) snake_case = path + '.py' assert script_name in os.listdir(A ) assert "__pycache__" not in os.listdir(A ) @pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.parametrize('path' , ['accuracy'] ) def __magic_name__ ( A , A ) -> int: inspect_metric(A , A ) snake_case = path + '.py' assert script_name in os.listdir(A ) assert "__pycache__" not in os.listdir(A ) @pytest.mark.parametrize( 'path, config_name, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> List[str]: snake_case = get_dataset_config_info(A , config_name=A ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __magic_name__ ( A , A , A ) -> Any: with pytest.raises(A ): get_dataset_config_info(A , config_name=A ) @pytest.mark.parametrize( 'path, expected' , [ ('squad', 'plain_text'), ('acronym_identification', 'default'), ('lhoestq/squad', 'plain_text'), ('lhoestq/test', 'default'), ('lhoestq/demo1', 'lhoestq--demo1'), ('dalle-mini/wit', 'dalle-mini--wit'), ] , ) def __magic_name__ ( A , A ) -> Dict: snake_case = get_dataset_config_names(A ) assert expected in config_names @pytest.mark.parametrize( 'path, expected_configs, expected_splits_in_first_config' , [ ('squad', ['plain_text'], ['train', 'validation']), ('dalle-mini/wit', ['dalle-mini--wit'], ['train']), ('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> List[str]: snake_case = get_dataset_infos(A ) assert list(infos.keys() ) == expected_configs snake_case = expected_configs[0] assert expected_config in infos snake_case = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( 'path, expected_config, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __magic_name__ ( A , A , A ) -> Any: snake_case = get_dataset_infos(A ) assert expected_config in infos snake_case = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __magic_name__ ( A , A , A ) -> int: with pytest.raises(A ): get_dataset_split_names(A , config_name=A )
332
1
'''simple docstring''' from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar lowerCAmelCase_ = TypeVar("T") class lowerCamelCase ( Generic[T] ): def __init__( self, lowercase_ ) -> Optional[int]: snake_case = data snake_case = None def __str__( self ) -> str: return F'''{self.data}''' class lowerCamelCase ( Generic[T] ): def __init__( self ) -> None: snake_case = None def __iter__( self ) -> Iterator[T]: snake_case = self.top while node: yield node.data snake_case = node.next def __str__( self ) -> str: return "->".join([str(lowercase_ ) for item in self] ) def __len__( self ) -> int: return len(tuple(iter(self ) ) ) def _lowerCamelCase ( self ) -> bool: return self.top is None def _lowerCamelCase ( self, lowercase_ ) -> None: snake_case = Node(lowercase_ ) if not self.is_empty(): snake_case = self.top snake_case = node def _lowerCamelCase ( self ) -> T: if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top, lowercase_ ) snake_case = self.top snake_case = self.top.next return pop_node.data def _lowerCamelCase ( self ) -> T: if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def _lowerCamelCase ( self ) -> None: snake_case = None if __name__ == "__main__": from doctest import testmod testmod()
332
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { "configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"], "processing_git": ["GitProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ "GIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GitForCausalLM", "GitModel", "GitPreTrainedModel", "GitVisionModel", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
332
1
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A ) -> None: create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] ) def __magic_name__ ( A , A , A , A , ) -> None: if index == len(A ): print(A ) return for i in range(len(A ) ): if not index_used[i]: current_sequence.append(sequence[i] ) snake_case = True create_state_space_tree(A , A , index + 1 , A ) current_sequence.pop() snake_case = False lowerCAmelCase_ = [3, 1, 2, 4] generate_all_permutations(sequence) lowerCAmelCase_ = ["A", "B", "C"] generate_all_permutations(sequence_a)
332
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu lowerCAmelCase_ = False class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCamelCase ( self ) -> List[Any]: return 12 @property def _lowerCamelCase ( self ) -> Dict: return 12 @property def _lowerCamelCase ( self ) -> List[Any]: return 32 @property def _lowerCamelCase ( self ) -> List[Any]: torch.manual_seed(0 ) snake_case = VQModel( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, ) return model @property def _lowerCamelCase ( self ) -> List[Any]: snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def _lowerCamelCase ( self ) -> Tuple: torch.manual_seed(0 ) snake_case = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(lowercase_ ) @property def _lowerCamelCase ( self ) -> str: torch.manual_seed(0 ) snake_case = 12 snake_case = 12 snake_case = { 'attention_bias': True, 'cross_attention_dim': 32, 'attention_head_dim': height * width, 'num_attention_heads': 1, 'num_vector_embeds': self.num_embed, 'num_embeds_ada_norm': self.num_embeds_ada_norm, 'norm_num_groups': 32, 'sample_size': width, 'activation_fn': 'geglu-approximate', } snake_case = TransformeraDModel(**lowercase_ ) return model def _lowerCamelCase ( self ) -> Tuple: snake_case = 'cpu' snake_case = self.dummy_vqvae snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_transformer snake_case = VQDiffusionScheduler(self.num_embed ) snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ ) snake_case = VQDiffusionPipeline( vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'teddy bear playing in the pool' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( [prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = 'cpu' snake_case = self.dummy_vqvae snake_case = self.dummy_text_encoder snake_case = self.dummy_tokenizer snake_case = self.dummy_transformer snake_case = VQDiffusionScheduler(self.num_embed ) snake_case = LearnedClassifierFreeSamplingEmbeddings( learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length ) snake_case = VQDiffusionPipeline( vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, ) snake_case = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case = 'teddy bear playing in the pool' snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' ) snake_case = output.images snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipe( [prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self ) -> str: snake_case = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' ) snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' ) snake_case = pipeline.to(lowercase_ ) pipeline.set_progress_bar_config(disable=lowercase_ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case = pipeline( 'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', ) snake_case = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
332
1
'''simple docstring''' def __magic_name__ ( A = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int: try: snake_case = int(A ) except (TypeError, ValueError): raise TypeError('Parameter n must be int or castable to int.' ) if n <= 0: raise ValueError('Parameter n must be greater than or equal to one.' ) snake_case = 1 snake_case = 2 while i * i <= n: while n % i == 0: snake_case = i n //= i i += 1 if n > 1: snake_case = n return int(A ) if __name__ == "__main__": print(f"{solution() = }")
332
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase ( metaclass=__lowerCAmelCase ): snake_case_ = ['''note_seq'''] def __init__( self, *lowercase_, **lowercase_ ) -> str: requires_backends(self, ['note_seq'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]: requires_backends(cls, ['note_seq'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]: requires_backends(cls, ['note_seq'] )
332
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = 42 class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ): @register_to_config def __init__( self, lowercase_ = 3, lowercase_ = 3, lowercase_ = ("DownEncoderBlock2D",), lowercase_ = ("UpDecoderBlock2D",), lowercase_ = (64,), lowercase_ = 1, lowercase_ = "silu", lowercase_ = 3, lowercase_ = 32, lowercase_ = 256, lowercase_ = 32, lowercase_ = None, lowercase_ = 0.18_215, lowercase_ = "group", ) -> str: super().__init__() # pass init params to Encoder snake_case = Encoder( in_channels=lowercase_, out_channels=lowercase_, down_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, double_z=lowercase_, ) snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels snake_case = nn.Convad(lowercase_, lowercase_, 1 ) snake_case = VectorQuantizer(lowercase_, lowercase_, beta=0.25, remap=lowercase_, sane_index_shape=lowercase_ ) snake_case = nn.Convad(lowercase_, lowercase_, 1 ) # pass init params to Decoder snake_case = Decoder( in_channels=lowercase_, out_channels=lowercase_, up_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, norm_type=lowercase_, ) @apply_forward_hook def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> VQEncoderOutput: snake_case = self.encoder(lowercase_ ) snake_case = self.quant_conv(lowercase_ ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowercase_ ) @apply_forward_hook def _lowerCamelCase ( self, lowercase_, lowercase_ = False, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: # also go through quantization layer if not force_not_quantize: snake_case , snake_case , snake_case = self.quantize(lowercase_ ) else: snake_case = h snake_case = self.post_quant_conv(lowercase_ ) snake_case = self.decoder(lowercase_, quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: snake_case = sample snake_case = self.encode(lowercase_ ).latents snake_case = self.decode(lowercase_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowercase_ )
332
'''simple docstring''' import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor lowerCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase ( __lowerCAmelCase ): def __init__( self, *lowercase_, **lowercase_ ) -> None: warnings.warn( 'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DPTImageProcessor instead.', lowercase_, ) super().__init__(*lowercase_, **lowercase_ )
332
1
'''simple docstring''' from typing import Any def __magic_name__ ( A ) -> list[Any]: if not input_list: return [] snake_case = [input_list.count(A ) for value in input_list] snake_case = max(A ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(A ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
332
'''simple docstring''' import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset lowerCAmelCase_ = pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) lowerCAmelCase_ = dataset.iloc[:, 1:2].values lowerCAmelCase_ = dataset.iloc[:, 2].values lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0) lowerCAmelCase_ = PolynomialFeatures(degree=4) lowerCAmelCase_ = poly_reg.fit_transform(X) lowerCAmelCase_ = LinearRegression() pol_reg.fit(X_poly, y) def __magic_name__ ( ) -> Any: plt.scatter(A , A , color='red' ) plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' ) plt.title('Truth or Bluff (Linear Regression)' ) plt.xlabel('Position level' ) plt.ylabel('Salary' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
332
1
'''simple docstring''' def __magic_name__ ( A , A , A , A ) -> Any: global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: snake_case = mf_knapsack(i - 1 , A , A , A ) else: snake_case = max( mf_knapsack(i - 1 , A , A , A ) , mf_knapsack(i - 1 , A , A , j - wt[i - 1] ) + val[i - 1] , ) snake_case = val return f[i][j] def __magic_name__ ( A , A , A , A ) -> Tuple: snake_case = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: snake_case = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: snake_case = dp[i - 1][w_] return dp[n][w_], dp def __magic_name__ ( A , A , A ) -> List[Any]: if not (isinstance(A , (list, tuple) ) and isinstance(A , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) snake_case = len(A ) if num_items != len(A ): snake_case = ( 'The number of weights must be the same as the number of values.\n' F'''But got {num_items} weights and {len(A )} values''' ) raise ValueError(A ) for i in range(A ): if not isinstance(wt[i] , A ): snake_case = ( 'All weights must be integers but got weight of ' F'''type {type(wt[i] )} at index {i}''' ) raise TypeError(A ) snake_case , snake_case = knapsack(A , A , A , A ) snake_case = set() _construct_solution(A , A , A , A , A ) return optimal_val, example_optional_set def __magic_name__ ( A , A , A , A , A ) -> Optional[int]: # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). # where i - 1 means considering only the previous items at the given maximum weight if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(A , A , i - 1 , A , A ) else: optimal_set.add(A ) _construct_solution(A , A , i - 1 , j - wt[i - 1] , A ) if __name__ == "__main__": lowerCAmelCase_ = [3, 2, 4, 4] lowerCAmelCase_ = [4, 3, 2, 3] lowerCAmelCase_ = 4 lowerCAmelCase_ = 6 lowerCAmelCase_ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] lowerCAmelCase_ , lowerCAmelCase_ = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 lowerCAmelCase_ , lowerCAmelCase_ = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("optimal_value = ", optimal_solution) print("An optimal subset corresponding to the optimal value", optimal_subset)
332
'''simple docstring''' import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''''' snake_case_ = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) snake_case_ = None # compression type in fsspec. ex: "gzip" snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str: super().__init__(self, **lowercase_ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode snake_case = fsspec.open( lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={ 'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459 'trust_env': True, # Enable reading proxy env variables. **(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed. }, **(target_options or {}), ) snake_case = os.path.basename(self.file.path.split('::' )[0] ) snake_case = ( self.compressed_name[: self.compressed_name.rindex('.' )] if '.' in self.compressed_name else self.compressed_name ) snake_case = None @classmethod def _lowerCamelCase ( cls, lowercase_ ) -> Any: # compressed file paths are always relative to the archive root return super()._strip_protocol(lowercase_ ).lstrip('/' ) def _lowerCamelCase ( self ) -> Optional[Any]: if self.dir_cache is None: snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name} snake_case = {f['name']: f} def _lowerCamelCase ( self, lowercase_ ) -> str: return self.file.open().read() def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any: snake_case = self._strip_protocol(lowercase_ ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''bz2''' snake_case_ = '''bz2''' snake_case_ = '''.bz2''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''gzip''' snake_case_ = '''gzip''' snake_case_ = '''.gz''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''lz4''' snake_case_ = '''lz4''' snake_case_ = '''.lz4''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''xz''' snake_case_ = '''xz''' snake_case_ = '''.xz''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''zstd''' snake_case_ = '''zstd''' snake_case_ = '''.zst''' def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]: super().__init__( fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 snake_case = self.file.__enter__ class lowerCamelCase : def __init__( self, lowercase_ ) -> List[Any]: snake_case = file_ def __enter__( self ) -> Dict: self._file.__enter__() return self def __exit__( self, *lowercase_, **lowercase_ ) -> Dict: self._file.__exit__(*lowercase_, **lowercase_ ) def __iter__( self ) -> List[str]: return iter(self._file ) def _lowerCamelCase ( self ) -> List[str]: return next(self._file ) def __getattr__( self, lowercase_ ) -> List[Any]: return getattr(self._file, lowercase_ ) def fixed_enter(*lowercase_, **lowercase_ ): return WrappedFile(_enter(*lowercase_, **lowercase_ ) ) snake_case = fixed_enter
332
1
'''simple docstring''' import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Any: snake_case = mock.Mock() snake_case = 500 snake_case = {} snake_case = HTTPError snake_case = {} # Download this model to make sure it's in the cache. snake_case = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request', return_value=__a ) as mock_head: snake_case = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def _lowerCamelCase ( self ) -> List[str]: snake_case = mock.Mock() snake_case = 500 snake_case = {} snake_case = HTTPError snake_case = {} # Download this model to make sure it's in the cache. snake_case = GPTaTokenizerFast.from_pretrained('gpt2' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request', return_value=__a ) as mock_head: snake_case = GPTaTokenizerFast.from_pretrained('gpt2' ) # This check we did call the fake head request mock_head.assert_called() def _lowerCamelCase ( self ) -> str: # This test is for deprecated behavior and can be removed in v5 try: snake_case = tempfile.mktemp() with open(__a, 'wb' ) as f: http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model', __a ) snake_case = AlbertTokenizer.from_pretrained(__a ) finally: os.remove(__a ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile('tokenizer.json' ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open('tokenizer.json', 'wb' ) as f: http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json', __a ) snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size, 1000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove('tokenizer.json' ) def _lowerCamelCase ( self ) -> Any: snake_case = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ) @is_staging_test class lowerCamelCase ( unittest.TestCase ): snake_case_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def _lowerCamelCase ( cls ) -> Dict: snake_case = TOKEN HfFolder.save_token(__a ) @classmethod def _lowerCamelCase ( cls ) -> Dict: try: delete_repo(token=cls._token, repo_id='test-tokenizer' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='valid_org/test-tokenizer-org' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='test-dynamic-tokenizer' ) except HTTPError: pass def _lowerCamelCase ( self ) -> Tuple: with tempfile.TemporaryDirectory() as tmp_dir: snake_case = os.path.join(__a, 'vocab.txt' ) with open(__a, 'w', encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) snake_case = BertTokenizer(__a ) tokenizer.push_to_hub('test-tokenizer', use_auth_token=self._token ) snake_case = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab ) # Reset repo delete_repo(token=self._token, repo_id='test-tokenizer' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__a, repo_id='test-tokenizer', push_to_hub=__a, use_auth_token=self._token ) snake_case = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab ) def _lowerCamelCase ( self ) -> List[str]: with tempfile.TemporaryDirectory() as tmp_dir: snake_case = os.path.join(__a, 'vocab.txt' ) with open(__a, 'w', encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) snake_case = BertTokenizer(__a ) tokenizer.push_to_hub('valid_org/test-tokenizer-org', use_auth_token=self._token ) snake_case = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' ) self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab ) # Reset repo delete_repo(token=self._token, repo_id='valid_org/test-tokenizer-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( __a, repo_id='valid_org/test-tokenizer-org', push_to_hub=__a, use_auth_token=self._token ) snake_case = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' ) self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab ) @require_tokenizers def _lowerCamelCase ( self ) -> Any: CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: snake_case = os.path.join(__a, 'vocab.txt' ) with open(__a, 'w', encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) snake_case = CustomTokenizer(__a ) # No fast custom tokenizer tokenizer.push_to_hub('test-dynamic-tokenizer', use_auth_token=self._token ) snake_case = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''', trust_remote_code=__a ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, 'CustomTokenizer' ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: snake_case = os.path.join(__a, 'vocab.txt' ) with open(__a, 'w', encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) snake_case = BertTokenizerFast.from_pretrained(__a ) bert_tokenizer.save_pretrained(__a ) snake_case = CustomTokenizerFast.from_pretrained(__a ) tokenizer.push_to_hub('test-dynamic-tokenizer', use_auth_token=self._token ) snake_case = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''', trust_remote_code=__a ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, 'CustomTokenizerFast' ) snake_case = AutoTokenizer.from_pretrained( F'''{USER}/test-dynamic-tokenizer''', use_fast=__a, trust_remote_code=__a ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, 'CustomTokenizer' ) class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = Trie() trie.add('Hello 友達' ) self.assertEqual(trie.data, {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} ) trie.add('Hello' ) trie.data self.assertEqual(trie.data, {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} ) def _lowerCamelCase ( self ) -> List[str]: snake_case = Trie() self.assertEqual(trie.split('[CLS] This is a extra_id_100' ), ['[CLS] This is a extra_id_100'] ) trie.add('[CLS]' ) trie.add('extra_id_1' ) trie.add('extra_id_100' ) self.assertEqual(trie.split('[CLS] This is a extra_id_100' ), ['[CLS]', ' This is a ', 'extra_id_100'] ) def _lowerCamelCase ( self ) -> Union[str, Any]: snake_case = Trie() trie.add('A' ) self.assertEqual(trie.split('ABC' ), ['A', 'BC'] ) self.assertEqual(trie.split('BCA' ), ['BC', 'A'] ) def _lowerCamelCase ( self ) -> Optional[int]: snake_case = Trie() trie.add('TOKEN]' ) trie.add('[SPECIAL_TOKEN]' ) self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ), ['This is something ', '[SPECIAL_TOKEN]'] ) def _lowerCamelCase ( self ) -> List[Any]: snake_case = Trie() trie.add('A' ) trie.add('P' ) trie.add('[SPECIAL_TOKEN]' ) self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ), ['This is something ', '[SPECIAL_TOKEN]'] ) def _lowerCamelCase ( self ) -> Union[str, Any]: snake_case = Trie() trie.add('AB' ) trie.add('B' ) trie.add('C' ) self.assertEqual(trie.split('ABC' ), ['AB', 'C'] ) def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = Trie() trie.add('ABC' ) trie.add('B' ) trie.add('CD' ) self.assertEqual(trie.split('ABCD' ), ['ABC', 'D'] ) def _lowerCamelCase ( self ) -> Optional[int]: snake_case = Trie() snake_case = trie.cut_text('ABC', [0, 0, 2, 1, 2, 3] ) self.assertEqual(__a, ['AB', 'C'] )
350
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A , A , A ) -> int | float: if len(A ) == 0: raise ValueError('find_max() arg is an empty sequence' ) if ( left >= len(A ) or left < -len(A ) or right >= len(A ) or right < -len(A ) ): raise IndexError('list index out of range' ) if left == right: return nums[left] snake_case = (left + right) >> 1 # the middle snake_case = find_max(A , A , A ) # find max in range[left, mid] snake_case = find_max(A , mid + 1 , A ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
332
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig lowerCAmelCase_ = { "albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json", "albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json", "albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json", "albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json", "albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json", "albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json", "albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json", "albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json", } class lowerCamelCase ( A__ ): snake_case_ = "albert" def __init__( self, lowercase_=30000, lowercase_=128, lowercase_=4096, lowercase_=12, lowercase_=1, lowercase_=64, lowercase_=16384, lowercase_=1, lowercase_="gelu_new", lowercase_=0, lowercase_=0, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=0.1, lowercase_="absolute", lowercase_=0, lowercase_=2, lowercase_=3, **lowercase_, ) -> str: super().__init__(pad_token_id=__A, bos_token_id=__A, eos_token_id=__A, **__A ) snake_case = vocab_size snake_case = embedding_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_hidden_groups snake_case = num_attention_heads snake_case = inner_group_num snake_case = hidden_act snake_case = intermediate_size snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_vocab_size snake_case = initializer_range snake_case = layer_norm_eps snake_case = classifier_dropout_prob snake_case = position_embedding_type class lowerCamelCase ( A__ ): @property def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case = {0: """batch""", 1: """choice""", 2: """sequence"""} else: snake_case = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
351
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = 42 class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ): @register_to_config def __init__( self, lowercase_ = 3, lowercase_ = 3, lowercase_ = ("DownEncoderBlock2D",), lowercase_ = ("UpDecoderBlock2D",), lowercase_ = (64,), lowercase_ = 1, lowercase_ = "silu", lowercase_ = 3, lowercase_ = 32, lowercase_ = 256, lowercase_ = 32, lowercase_ = None, lowercase_ = 0.18_215, lowercase_ = "group", ) -> str: super().__init__() # pass init params to Encoder snake_case = Encoder( in_channels=lowercase_, out_channels=lowercase_, down_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, double_z=lowercase_, ) snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels snake_case = nn.Convad(lowercase_, lowercase_, 1 ) snake_case = VectorQuantizer(lowercase_, lowercase_, beta=0.25, remap=lowercase_, sane_index_shape=lowercase_ ) snake_case = nn.Convad(lowercase_, lowercase_, 1 ) # pass init params to Decoder snake_case = Decoder( in_channels=lowercase_, out_channels=lowercase_, up_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, norm_type=lowercase_, ) @apply_forward_hook def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> VQEncoderOutput: snake_case = self.encoder(lowercase_ ) snake_case = self.quant_conv(lowercase_ ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowercase_ ) @apply_forward_hook def _lowerCamelCase ( self, lowercase_, lowercase_ = False, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: # also go through quantization layer if not force_not_quantize: snake_case , snake_case , snake_case = self.quantize(lowercase_ ) else: snake_case = h snake_case = self.post_quant_conv(lowercase_ ) snake_case = self.decoder(lowercase_, quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowercase_ ) def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: snake_case = sample snake_case = self.encode(lowercase_ ).latents snake_case = self.decode(lowercase_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowercase_ )
332
0
'''simple docstring''' def __magic_name__ ( A ) -> bool: return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print("Program to check whether a number is a Perfect number or not...") lowerCAmelCase_ = int(input("Enter number: ").strip()) print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
352
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int: snake_case = [0] snake_case = 42 for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target snake_case = 0 # the area corresponding to the grid that gives the product closest to target snake_case = 0 # an estimate of b, using the quadratic formula snake_case = 42 # the largest integer less than b_estimate snake_case = 42 # the largest integer less than b_estimate snake_case = 42 # the triangle number corresponding to b_floor snake_case = 42 # the triangle number corresponding to b_ceil snake_case = 42 for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 snake_case = floor(A ) snake_case = ceil(A ) snake_case = triangle_numbers[b_floor] snake_case = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): snake_case = triangle_b_first_guess * triangle_a snake_case = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): snake_case = triangle_b_second_guess * triangle_a snake_case = idx_a * b_ceil return area if __name__ == "__main__": print(f"{solution() = }")
332
0
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow lowerCAmelCase_ = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ "text-classification", "language-modeling", "summarization", "token-classification", "question-answering", ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) lowerCAmelCase_ = logging.getLogger() def __magic_name__ ( ) -> Any: snake_case = argparse.ArgumentParser() parser.add_argument('-f' ) snake_case = parser.parse_args() return args.f def __magic_name__ ( UpperCamelCase_ , UpperCamelCase_="eval" ) -> Optional[Any]: snake_case = os.path.join(__lowerCAmelCase , F'''{split}_results.json''' ) if os.path.exists(__lowerCAmelCase ): with open(__lowerCAmelCase , 'r' ) as f: return json.load(__lowerCAmelCase ) raise ValueError(F'''can\'t find {path}''' ) lowerCAmelCase_ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class lowerCamelCase ( A__ ): def _lowerCamelCase ( self ) -> Dict: snake_case = self.get_auto_remove_tmp_dir() snake_case = F''' run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 '''.split() with patch.object(__snake_case, 'argv', __snake_case ): run_flax_glue.main() snake_case = get_results(__snake_case ) self.assertGreaterEqual(result['eval_accuracy'], 0.75 ) @slow def _lowerCamelCase ( self ) -> List[Any]: snake_case = self.get_auto_remove_tmp_dir() snake_case = F''' run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir '''.split() with patch.object(__snake_case, 'argv', __snake_case ): run_clm_flax.main() snake_case = get_results(__snake_case ) self.assertLess(result['eval_perplexity'], 100 ) @slow def _lowerCamelCase ( self ) -> Tuple: snake_case = self.get_auto_remove_tmp_dir() snake_case = F''' run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate '''.split() with patch.object(__snake_case, 'argv', __snake_case ): run_summarization_flax.main() snake_case = get_results(__snake_case, split='test' ) self.assertGreaterEqual(result['test_rouge1'], 10 ) self.assertGreaterEqual(result['test_rouge2'], 2 ) self.assertGreaterEqual(result['test_rougeL'], 7 ) self.assertGreaterEqual(result['test_rougeLsum'], 7 ) @slow def _lowerCamelCase ( self ) -> Optional[Any]: snake_case = self.get_auto_remove_tmp_dir() snake_case = F''' run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 '''.split() with patch.object(__snake_case, 'argv', __snake_case ): run_mlm_flax.main() snake_case = get_results(__snake_case ) self.assertLess(result['eval_perplexity'], 42 ) @slow def _lowerCamelCase ( self ) -> Any: snake_case = self.get_auto_remove_tmp_dir() snake_case = F''' run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir '''.split() with patch.object(__snake_case, 'argv', __snake_case ): run_ta_mlm_flax.main() snake_case = get_results(__snake_case ) self.assertGreaterEqual(result['eval_accuracy'], 0.42 ) @slow def _lowerCamelCase ( self ) -> Union[str, Any]: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu snake_case = 7 if get_gpu_count() > 1 else 2 snake_case = self.get_auto_remove_tmp_dir() snake_case = F''' run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 '''.split() with patch.object(__snake_case, 'argv', __snake_case ): run_flax_ner.main() snake_case = get_results(__snake_case ) self.assertGreaterEqual(result['eval_accuracy'], 0.75 ) self.assertGreaterEqual(result['eval_f1'], 0.3 ) @slow def _lowerCamelCase ( self ) -> List[Any]: snake_case = self.get_auto_remove_tmp_dir() snake_case = F''' run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 '''.split() with patch.object(__snake_case, 'argv', __snake_case ): run_qa.main() snake_case = get_results(__snake_case ) self.assertGreaterEqual(result['eval_f1'], 30 ) self.assertGreaterEqual(result['eval_exact'], 30 )
353
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = { "configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"], "processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["VisionTextDualEncoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["FlaxVisionTextDualEncoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["TFVisionTextDualEncoderModel"] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
332
0
'''simple docstring''' import heapq def __magic_name__ ( A ) -> set[int]: snake_case = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] ) # chosen_vertices = set of chosen vertices snake_case = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices snake_case = heapq.heappop(lowerCamelCase__ )[1][0] chosen_vertices.add(lowerCamelCase__ ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: snake_case = elem[1][1].index(lowerCamelCase__ ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(lowerCamelCase__ ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase_ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(f"Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}")
354
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCamelCase : snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} ) snake_case_ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) snake_case_ = field( default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , ) snake_case_ = field( default=64 , metadata={ '''help''': ( '''The maximum number of tokens for the question. Questions longer than this will ''' '''be truncated to this length.''' ) } , ) snake_case_ = field( default=30 , metadata={ '''help''': ( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ) } , ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) snake_case_ = field( default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} ) snake_case_ = field( default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) snake_case_ = field( default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) snake_case_ = field( default=0 , metadata={ '''help''': ( '''language id of input for language-specific xlm models (see''' ''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)''' ) } , ) snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} ) class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = '''train''' snake_case_ = '''dev''' class lowerCamelCase ( __lowerCAmelCase ): snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int: snake_case = args snake_case = is_language_sensitive snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(lowercase_, lowercase_ ): try: snake_case = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) snake_case = mode # Load data features from cache or dataset file snake_case = 'v2' if args.version_2_with_negative else 'v1' snake_case = os.path.join( cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. snake_case = cached_features_file + '.lock' with FileLock(lowercase_ ): if os.path.exists(lowercase_ ) and not args.overwrite_cache: snake_case = time.time() snake_case = torch.load(lowercase_ ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. snake_case = self.old_features['features'] snake_case = self.old_features.get('dataset', lowercase_ ) snake_case = self.old_features.get('examples', lowercase_ ) logger.info( F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' ' future run' ) else: if mode == Split.dev: snake_case = self.processor.get_dev_examples(args.data_dir ) else: snake_case = self.processor.get_train_examples(args.data_dir ) snake_case , snake_case = squad_convert_examples_to_features( examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, ) snake_case = time.time() torch.save( {'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self ) -> Tuple: return len(self.features ) def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]: # Convert to Tensors and build dataset snake_case = self.features[i] snake_case = torch.tensor(feature.input_ids, dtype=torch.long ) snake_case = torch.tensor(feature.attention_mask, dtype=torch.long ) snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long ) snake_case = torch.tensor(feature.cls_index, dtype=torch.long ) snake_case = torch.tensor(feature.p_mask, dtype=torch.float ) snake_case = torch.tensor(feature.is_impossible, dtype=torch.float ) snake_case = { 'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'cls_index': cls_index, 'p_mask': p_mask} ) if self.args.version_2_with_negative: inputs.update({'is_impossible': is_impossible} ) if self.is_language_sensitive: inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: snake_case = torch.tensor(feature.start_position, dtype=torch.long ) snake_case = torch.tensor(feature.end_position, dtype=torch.long ) inputs.update({'start_positions': start_positions, 'end_positions': end_positions} ) return inputs
332
0
from __future__ import annotations import math def __magic_name__ ( A ) -> Union[str, Any]: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True lowerCAmelCase_ = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)] def __magic_name__ ( A ) -> List[str]: if not isinstance(__a , __a ): raise ValueError('n must be an integer' ) if n <= 0: raise ValueError('n must be >= 0' ) snake_case = [] for num in range(len(__a ) ): snake_case = 0 while 2 * i * i <= odd_composites[num]: snake_case = odd_composites[num] - 2 * i * i if is_prime(__a ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(__a ) == n: return list_nums return [] def __magic_name__ ( ) -> Any: return compute_nums(1 )[0] if __name__ == "__main__": print(f"{solution() = }")
355
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def __magic_name__ ( A , A , A ) -> Any: # Initialise PyTorch model snake_case = BertConfig.from_json_file(A ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case = BertForPreTraining(A ) # Load weights from tf checkpoint load_tf_weights_in_bert(A , A , A ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , A ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--bert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
332
0
'''simple docstring''' def __magic_name__ ( A , A ) -> str: if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) snake_case = str(bin(__snake_case ) )[2:] # remove the leading "0b" snake_case = str(bin(__snake_case ) )[2:] # remove the leading "0b" snake_case = max(len(__snake_case ) , len(__snake_case ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
356
'''simple docstring''' from __future__ import annotations def __magic_name__ ( A ) -> list: if len(A ) == 0: return [] snake_case , snake_case = min(A ), max(A ) snake_case = int(max_value - min_value ) + 1 snake_case = [[] for _ in range(A )] for i in my_list: buckets[int(i - min_value )].append(A ) return [v for bucket in buckets for v in sorted(A )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
332
0
'''simple docstring''' from __future__ import annotations import math import numpy as np from numpy.linalg import norm def __magic_name__ ( A , A ) -> float: return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__A , __A ) ) ) def __magic_name__ ( A , A ) -> list[list[list[float] | float]]: if dataset.ndim != value_array.ndim: snake_case = ( 'Wrong input data\'s dimensions... ' F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}''' ) raise ValueError(__A ) try: if dataset.shape[1] != value_array.shape[1]: snake_case = ( 'Wrong input data\'s shape... ' F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}''' ) raise ValueError(__A ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError('Wrong shape' ) if dataset.dtype != value_array.dtype: snake_case = ( 'Input data have different datatype... ' F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}''' ) raise TypeError(__A ) snake_case = [] for value in value_array: snake_case = euclidean(__A , dataset[0] ) snake_case = dataset[0].tolist() for dataset_value in dataset[1:]: snake_case = euclidean(__A , __A ) if dist > temp_dist: snake_case = temp_dist snake_case = dataset_value.tolist() answer.append([vector, dist] ) return answer def __magic_name__ ( A , A ) -> float: return np.dot(__A , __A ) / (norm(__A ) * norm(__A )) if __name__ == "__main__": import doctest doctest.testmod()
357
'''simple docstring''' def __magic_name__ ( A ) -> float: return 1_0 - x * x def __magic_name__ ( A , A ) -> float: # Bolzano theory in order to find if there is a root between a and b if equation(A ) * equation(A ) >= 0: raise ValueError('Wrong space!' ) snake_case = a while (b - a) >= 0.01: # Find middle point snake_case = (a + b) / 2 # Check if middle point is root if equation(A ) == 0.0: break # Decide the side to repeat the steps if equation(A ) * equation(A ) < 0: snake_case = c else: snake_case = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
332
0
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def __magic_name__ ( A , A , A , A , A=True , A="pt" ) -> Any: snake_case = {"""add_prefix_space""": True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(' ' ) else {} snake_case = padding_side return tokenizer( [line] , max_length=lowerCamelCase_ , padding='max_length' if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , ) def __magic_name__ ( A , A , A=None , ) -> str: snake_case = input_ids.ne(lowerCamelCase_ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class lowerCamelCase ( lowercase__ ): def __init__( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_="train", lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="", ) -> int: super().__init__() snake_case = Path(_UpperCamelCase ).joinpath(type_path + '.source' ) snake_case = Path(_UpperCamelCase ).joinpath(type_path + '.target' ) snake_case = self.get_char_lens(self.src_file ) snake_case = max_source_length snake_case = max_target_length assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}''' snake_case = tokenizer snake_case = prefix if n_obs is not None: snake_case = self.src_lens[:n_obs] snake_case = src_lang snake_case = tgt_lang def __len__( self ) -> Optional[Any]: return len(self.src_lens ) def __getitem__( self, lowercase_ ) -> List[str]: snake_case = index + 1 # linecache starts at 1 snake_case = self.prefix + linecache.getline(str(self.src_file ), _UpperCamelCase ).rstrip('\n' ) snake_case = linecache.getline(str(self.tgt_file ), _UpperCamelCase ).rstrip('\n' ) assert source_line, F'''empty source line for index {index}''' assert tgt_line, F'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer, _UpperCamelCase ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right snake_case = ( self.tokenizer.question_encoder if isinstance(self.tokenizer, _UpperCamelCase ) else self.tokenizer ) snake_case = self.tokenizer.generator if isinstance(self.tokenizer, _UpperCamelCase ) else self.tokenizer snake_case = encode_line(_UpperCamelCase, _UpperCamelCase, self.max_source_length, 'right' ) snake_case = encode_line(_UpperCamelCase, _UpperCamelCase, self.max_target_length, 'right' ) snake_case = source_inputs["""input_ids"""].squeeze() snake_case = target_inputs["""input_ids"""].squeeze() snake_case = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def _lowerCamelCase ( lowercase_ ) -> Optional[Any]: return [len(_UpperCamelCase ) for x in Path(_UpperCamelCase ).open().readlines()] def _lowerCamelCase ( self, lowercase_ ) -> int: snake_case = torch.stack([x['input_ids'] for x in batch] ) snake_case = torch.stack([x['attention_mask'] for x in batch] ) snake_case = torch.stack([x['decoder_input_ids'] for x in batch] ) snake_case = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer, _UpperCamelCase ) else self.tokenizer.pad_token_id ) snake_case = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer, _UpperCamelCase ) else self.tokenizer.pad_token_id ) snake_case = trim_batch(_UpperCamelCase, _UpperCamelCase ) snake_case = trim_batch(_UpperCamelCase, _UpperCamelCase, attention_mask=_UpperCamelCase ) snake_case = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch lowerCAmelCase_ = getLogger(__name__) def __magic_name__ ( A ) -> Tuple: return list(itertools.chain.from_iterable(lowerCamelCase_ ) ) def __magic_name__ ( A ) -> str: snake_case = get_git_info() save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , 'git_log.json' ) ) def __magic_name__ ( A , A , A=4 , **A ) -> int: with open(lowerCamelCase_ , 'w' ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ ) def __magic_name__ ( A ) -> Dict: with open(lowerCamelCase_ ) as f: return json.load(lowerCamelCase_ ) def __magic_name__ ( ) -> List[Any]: snake_case = git.Repo(search_parent_directories=lowerCamelCase_ ) snake_case = { """repo_id""": str(lowerCamelCase_ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def __magic_name__ ( A , A ) -> str: return list(map(lowerCamelCase_ , lowerCamelCase_ ) ) def __magic_name__ ( A , A ) -> Tuple: with open(lowerCamelCase_ , 'wb' ) as f: return pickle.dump(lowerCamelCase_ , lowerCamelCase_ ) def __magic_name__ ( A ) -> str: def remove_articles(A ): return re.sub(R'\b(a|an|the)\b' , ' ' , lowerCamelCase_ ) def white_space_fix(A ): return " ".join(text.split() ) def remove_punc(A ): snake_case = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(A ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) ) def __magic_name__ ( A , A ) -> int: snake_case = normalize_answer(lowerCamelCase_ ).split() snake_case = normalize_answer(lowerCamelCase_ ).split() snake_case = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ ) snake_case = sum(common.values() ) if num_same == 0: return 0 snake_case = 1.0 * num_same / len(lowerCamelCase_ ) snake_case = 1.0 * num_same / len(lowerCamelCase_ ) snake_case = (2 * precision * recall) / (precision + recall) return fa def __magic_name__ ( A , A ) -> Optional[int]: return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ ) def __magic_name__ ( A , A ) -> Any: assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) snake_case = 0 for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ): em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: em /= len(lowerCamelCase_ ) return {"em": em} def __magic_name__ ( A ) -> List[Any]: return model_prefix.startswith('rag' ) def __magic_name__ ( A , A , A ) -> Optional[Any]: snake_case = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead snake_case = """dropout_rate""" for p in extra_params: if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ): logger.info('config doesn\'t have a `{}` attribute'.format(lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) continue snake_case = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p] setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) return hparams, config
358
'''simple docstring''' import pytest lowerCAmelCase_ = "__dummy_dataset1__" lowerCAmelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n" @pytest.fixture def __magic_name__ ( ) -> List[Any]: return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def __magic_name__ ( ) -> Union[str, Any]: return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def __magic_name__ ( A , A , A ) -> Optional[int]: snake_case = dataset_loading_script_name snake_case = tmp_path / 'datasets' / script_name script_dir.mkdir(parents=A ) snake_case = script_dir / F'''{script_name}.py''' with open(A , 'w' ) as f: f.write(A ) return str(A )
332
0