code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import os from math import logaa def UpperCamelCase ( lowerCAmelCase__ = "base_exp.txt" ): '''simple docstring''' lowercase = 0 lowercase = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) ): lowercase , lowercase = list(map(lowerCAmelCase__ , line.split(''',''' ) ) ) if x * logaa(lowerCAmelCase__ ) > largest: lowercase = x * logaa(lowerCAmelCase__ ) lowercase = i + 1 return result if __name__ == "__main__": print(solution())
101
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar __SCREAMING_SNAKE_CASE :Optional[int] = TypeVar('''T''') class A_ ( Generic[T] ): def __init__( self : List[Any] , snake_case_ : list[T] , snake_case_ : Callable[[T, T], T] ): _UpperCAmelCase = None _UpperCAmelCase = len(snake_case_ ) _UpperCAmelCase = [any_type for _ in range(self.N )] + arr _UpperCAmelCase = fnc self.build() def lowercase ( self : List[Any] ): for p in range(self.N - 1 , 0 , -1 ): _UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : T ): p += self.N _UpperCAmelCase = v while p > 1: _UpperCAmelCase = p // 2 _UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def lowercase ( self : Any , snake_case_ : int , snake_case_ : int ): # noqa: E741 _UpperCAmelCase , _UpperCAmelCase = l + self.N, r + self.N _UpperCAmelCase = None while l <= r: if l % 2 == 1: _UpperCAmelCase = self.st[l] if res is None else self.fn(snake_case_ , self.st[l] ) if r % 2 == 0: _UpperCAmelCase = self.st[r] if res is None else self.fn(snake_case_ , self.st[r] ) _UpperCAmelCase , _UpperCAmelCase = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce __SCREAMING_SNAKE_CASE :Union[str, Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] __SCREAMING_SNAKE_CASE :List[str] = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } __SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, min) __SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, max) __SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, lambda a, b: a + b) def UpperCAmelCase_ ( ) -> None: '''simple docstring''' for i in range(len(__lowercase ) ): for j in range(__lowercase , len(__lowercase ) ): _UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] ) _UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] ) _UpperCAmelCase = reduce(lambda __lowercase , __lowercase : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(__lowercase , __lowercase ) assert max_range == max_segment_tree.query(__lowercase , __lowercase ) assert sum_range == sum_segment_tree.query(__lowercase , __lowercase ) test_all_segments() for index, value in test_updates.items(): __SCREAMING_SNAKE_CASE :str = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
22
0
"""simple docstring""" from pathlib import Path import fire from tqdm import tqdm def lowercase ( _snake_case : int="ro" , _snake_case : Dict="en" , _snake_case : int="wmt16" , _snake_case : List[str]=None ) ->None: """simple docstring""" try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError('''run pip install datasets''' ) __snake_case : Union[str, Any] = f"""{src_lang}-{tgt_lang}""" print(f"""Converting {dataset}-{pair}""" ) __snake_case : Optional[Any] = datasets.load_dataset(_snake_case , _snake_case ) if save_dir is None: __snake_case : int = f"""{dataset}-{pair}""" __snake_case : Union[str, Any] = Path(_snake_case ) save_dir.mkdir(exist_ok=_snake_case ) for split in ds.keys(): print(f"""Splitting {split} with {ds[split].num_rows} records""" ) # to save to val.source, val.target like summary datasets __snake_case : Union[str, Any] = '''val''' if split == '''validation''' else split __snake_case : List[str] = save_dir.joinpath(f"""{fn}.source""" ) __snake_case : int = save_dir.joinpath(f"""{fn}.target""" ) __snake_case : Union[str, Any] = src_path.open('''w+''' ) __snake_case : Union[str, Any] = tgt_path.open('''w+''' ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): __snake_case : List[str] = x['''translation'''] src_fp.write(ex[src_lang] + '''\n''' ) tgt_fp.write(ex[tgt_lang] + '''\n''' ) print(f"""Saved {dataset} dataset to {save_dir}""" ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
102
'''simple docstring''' import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( "kwargs, expected" , [ ({"num_shards": 0, "max_num_jobs": 1}, []), ({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]), ({"num_shards": 10, "max_num_jobs": 10}, [range(__lowercase , i + 1 ) for i in range(10 )]), ({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]), ({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = _distribute_shards(**__lowercase ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, max_num_jobs, expected" , [ ({"foo": 0}, 10, [{"foo": 0}]), ({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]), ({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]), ({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]), ({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]), ] , ) def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : Optional[Any] , __lowercase : int ) -> str: '''simple docstring''' _UpperCAmelCase = _split_gen_kwargs(__lowercase , __lowercase ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, expected" , [ ({"foo": 0}, 1), ({"shards": [0]}, 1), ({"shards": [0, 1, 2, 3]}, 4), ({"shards": [0, 1, 2, 3], "foo": 0}, 4), ({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4), ({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError), ] , ) def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : List[Any] ) -> List[Any]: '''simple docstring''' if expected is RuntimeError: with pytest.raises(__lowercase ): _number_of_shards_in_gen_kwargs(__lowercase ) else: _UpperCAmelCase = _number_of_shards_in_gen_kwargs(__lowercase ) assert out == expected
22
0
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class __snake_case ( UpperCamelCase_ ): _a = ['''vqvae'''] def __init__( self : Tuple , A_ : AutoencoderKL , A_ : UNetaDConditionModel , A_ : Mel , A_ : Union[DDIMScheduler, DDPMScheduler] , ): super().__init__() self.register_modules(unet=A_ , scheduler=A_ , mel=A_ , vqvae=A_) def UpperCAmelCase__ ( self : Any): return 5_0 if isinstance(self.scheduler , A_) else 1_0_0_0 @torch.no_grad() def __call__( self : Any , A_ : int = 1 , A_ : str = None , A_ : np.ndarray = None , A_ : int = 0 , A_ : int = 0 , A_ : int = None , A_ : torch.Generator = None , A_ : float = 0 , A_ : float = 0 , A_ : torch.Generator = None , A_ : float = 0 , A_ : torch.Tensor = None , A_ : torch.Tensor = None , A_ : int=True , ): lowerCAmelCase_ : Union[str, Any] = steps or self.get_default_steps() self.scheduler.set_timesteps(A_) lowerCAmelCase_ : int = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size) == int: lowerCAmelCase_ : Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: lowerCAmelCase_ : int = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=A_ , device=self.device , ) lowerCAmelCase_ : Tuple = noise lowerCAmelCase_ : Union[str, Any] = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(A_ , A_) lowerCAmelCase_ : Optional[int] = self.mel.audio_slice_to_image(A_) lowerCAmelCase_ : int = np.frombuffer(input_image.tobytes() , dtype='''uint8''').reshape( (input_image.height, input_image.width)) lowerCAmelCase_ : str = (input_image / 2_5_5) * 2 - 1 lowerCAmelCase_ : List[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device) if self.vqvae is not None: lowerCAmelCase_ : Optional[Any] = self.vqvae.encode(torch.unsqueeze(A_ , 0)).latent_dist.sample( generator=A_)[0] lowerCAmelCase_ : Optional[int] = self.vqvae.config.scaling_factor * input_images if start_step > 0: lowerCAmelCase_ : List[str] = self.scheduler.add_noise(A_ , A_ , self.scheduler.timesteps[start_step - 1]) lowerCAmelCase_ : Union[str, Any] = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) lowerCAmelCase_ : List[Any] = int(mask_start_secs * pixels_per_second) lowerCAmelCase_ : Optional[Any] = int(mask_end_secs * pixels_per_second) lowerCAmelCase_ : str = self.scheduler.add_noise(A_ , A_ , torch.tensor(self.scheduler.timesteps[start_step:])) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])): if isinstance(self.unet , A_): lowerCAmelCase_ : Optional[Any] = self.unet(A_ , A_ , A_)['''sample'''] else: lowerCAmelCase_ : Dict = self.unet(A_ , A_)['''sample'''] if isinstance(self.scheduler , A_): lowerCAmelCase_ : List[Any] = self.scheduler.step( model_output=A_ , timestep=A_ , sample=A_ , eta=A_ , generator=A_ , )['''prev_sample'''] else: lowerCAmelCase_ : int = self.scheduler.step( model_output=A_ , timestep=A_ , sample=A_ , generator=A_ , )['''prev_sample'''] if mask is not None: if mask_start > 0: lowerCAmelCase_ : Optional[Any] = mask[:, step, :, :mask_start] if mask_end > 0: lowerCAmelCase_ : Any = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance lowerCAmelCase_ : Tuple = 1 / self.vqvae.config.scaling_factor * images lowerCAmelCase_ : Tuple = self.vqvae.decode(A_)['''sample'''] lowerCAmelCase_ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1) lowerCAmelCase_ : List[str] = images.cpu().permute(0 , 2 , 3 , 1).numpy() lowerCAmelCase_ : Union[str, Any] = (images * 2_5_5).round().astype('''uint8''') lowerCAmelCase_ : Optional[Any] = list( (Image.fromarray(_[:, :, 0]) for _ in images) if images.shape[3] == 1 else (Image.fromarray(A_ , mode='''RGB''').convert('''L''') for _ in images)) lowerCAmelCase_ : Optional[Any] = [self.mel.image_to_audio(A_) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(A_)[:, np.newaxis, :]) , **ImagePipelineOutput(A_)) @torch.no_grad() def UpperCAmelCase__ ( self : Optional[int] , A_ : List[Image.Image] , A_ : int = 5_0): assert isinstance(self.scheduler , A_) self.scheduler.set_timesteps(A_) lowerCAmelCase_ : int = np.array( [np.frombuffer(image.tobytes() , dtype='''uint8''').reshape((1, image.height, image.width)) for image in images]) lowerCAmelCase_ : Any = (sample / 2_5_5) * 2 - 1 lowerCAmelCase_ : List[str] = torch.Tensor(A_).to(self.device) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))): lowerCAmelCase_ : Optional[int] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps lowerCAmelCase_ : Tuple = self.scheduler.alphas_cumprod[t] lowerCAmelCase_ : Optional[int] = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) lowerCAmelCase_ : int = 1 - alpha_prod_t lowerCAmelCase_ : Optional[Any] = self.unet(A_ , A_)['''sample'''] lowerCAmelCase_ : Optional[Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output lowerCAmelCase_ : Tuple = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) lowerCAmelCase_ : List[str] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def UpperCAmelCase__ ( A_ : torch.Tensor , A_ : torch.Tensor , A_ : float): lowerCAmelCase_ : Dict = acos(torch.dot(torch.flatten(A_) , torch.flatten(A_)) / torch.norm(A_) / torch.norm(A_)) return sin((1 - alpha) * theta) * xa / sin(A_) + sin(alpha * theta) * xa / sin(A_)
103
'''simple docstring''' import math def UpperCAmelCase_ ( __lowercase : int ) -> bool: '''simple docstring''' return math.sqrt(__lowercase ) * math.sqrt(__lowercase ) == num def UpperCAmelCase_ ( __lowercase : int ) -> bool: '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = n while left <= right: _UpperCAmelCase = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: _UpperCAmelCase = mid - 1 else: _UpperCAmelCase = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
22
0
'''simple docstring''' import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''') # TF training parameters lowerCAmelCase__ = False lowerCAmelCase__ = False def _A ( A__ ): """simple docstring""" return TrainCommand(A__ ) class lowercase_ (lowerCamelCase__ ): """simple docstring""" @staticmethod def SCREAMING_SNAKE_CASE ( lowercase__ : ArgumentParser ): __lowercase = parser.add_parser('''train''' ,help='''CLI tool to train a model on a task.''' ) train_parser.add_argument( '''--train_data''' ,type=lowercase__ ,required=lowercase__ ,help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' ,) train_parser.add_argument( '''--column_label''' ,type=lowercase__ ,default=0 ,help='''Column of the dataset csv file with example labels.''' ) train_parser.add_argument( '''--column_text''' ,type=lowercase__ ,default=1 ,help='''Column of the dataset csv file with example texts.''' ) train_parser.add_argument( '''--column_id''' ,type=lowercase__ ,default=2 ,help='''Column of the dataset csv file with example ids.''' ) train_parser.add_argument( '''--skip_first_row''' ,action='''store_true''' ,help='''Skip the first row of the csv file (headers).''' ) train_parser.add_argument('''--validation_data''' ,type=lowercase__ ,default='''''' ,help='''path to validation dataset.''' ) train_parser.add_argument( '''--validation_split''' ,type=lowercase__ ,default=0.1 ,help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' ,) train_parser.add_argument('''--output''' ,type=lowercase__ ,default='''./''' ,help='''path to saved the trained model.''' ) train_parser.add_argument( '''--task''' ,type=lowercase__ ,default='''text_classification''' ,help='''Task to train the model on.''' ) train_parser.add_argument( '''--model''' ,type=lowercase__ ,default='''bert-base-uncased''' ,help='''Model\'s name or path to stored model.''' ) train_parser.add_argument('''--train_batch_size''' ,type=lowercase__ ,default=3_2 ,help='''Batch size for training.''' ) train_parser.add_argument('''--valid_batch_size''' ,type=lowercase__ ,default=6_4 ,help='''Batch size for validation.''' ) train_parser.add_argument('''--learning_rate''' ,type=lowercase__ ,default=3e-5 ,help='''Learning rate.''' ) train_parser.add_argument('''--adam_epsilon''' ,type=lowercase__ ,default=1e-0_8 ,help='''Epsilon for Adam optimizer.''' ) train_parser.set_defaults(func=lowercase__ ) def __init__( self : int ,lowercase__ : Namespace ): __lowercase = logging.get_logger('''transformers-cli/training''' ) __lowercase = '''tf''' if is_tf_available() else '''torch''' os.makedirs(args.output ,exist_ok=lowercase__ ) __lowercase = args.output __lowercase = args.column_label __lowercase = args.column_text __lowercase = args.column_id self.logger.info(F"Loading {args.task} pipeline for {args.model}" ) if args.task == "text_classification": __lowercase = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(F"Loading dataset from {args.train_data}" ) __lowercase = Processor.create_from_csv( args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,) __lowercase = None if args.validation_data: self.logger.info(F"Loading validation dataset from {args.validation_data}" ) __lowercase = Processor.create_from_csv( args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,) __lowercase = args.validation_split __lowercase = args.train_batch_size __lowercase = args.valid_batch_size __lowercase = args.learning_rate __lowercase = args.adam_epsilon def SCREAMING_SNAKE_CASE ( self : Any ): if self.framework == "tf": return self.run_tf() return self.run_torch() def SCREAMING_SNAKE_CASE ( self : int ): raise NotImplementedError def SCREAMING_SNAKE_CASE ( self : Tuple ): self.pipeline.fit( self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,) # Save trained pipeline self.pipeline.save_pretrained(self.output )
104
'''simple docstring''' import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin __SCREAMING_SNAKE_CASE :Dict = 1e-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class A_ : def __init__( self : List[Any] , snake_case_ : int , snake_case_ : Dict=1_6 , snake_case_ : Dict=1_3 , snake_case_ : int=7 , snake_case_ : Any=1_4 , snake_case_ : int=1_0 , snake_case_ : Any=1_9 , snake_case_ : int=5 , snake_case_ : Any=4 , snake_case_ : Tuple=True , snake_case_ : Optional[int]=1_6 , snake_case_ : List[str]=2 , snake_case_ : Any=4 , snake_case_ : List[Any]=4 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Optional[int]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Tuple=[1, 2, 3, 4, 5] , snake_case_ : str=2_5 , snake_case_ : Any=5 , ): _UpperCAmelCase = d_model _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = prediction_length _UpperCAmelCase = context_length _UpperCAmelCase = cardinality _UpperCAmelCase = num_time_features _UpperCAmelCase = lags_sequence _UpperCAmelCase = embedding_dimension _UpperCAmelCase = is_training _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = context_length _UpperCAmelCase = prediction_length + label_length _UpperCAmelCase = label_length _UpperCAmelCase = moving_average _UpperCAmelCase = autocorrelation_factor def lowercase ( self : Union[str, Any] ): return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def lowercase ( self : int , snake_case_ : Optional[Any] ): _UpperCAmelCase = config.context_length + max(config.lags_sequence ) _UpperCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) _UpperCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) _UpperCAmelCase = floats_tensor([self.batch_size, _past_length] ) _UpperCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs _UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) _UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length] ) _UpperCAmelCase = { "past_values": past_values, "static_categorical_features": static_categorical_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def lowercase ( self : List[Any] ): _UpperCAmelCase = self.get_config() _UpperCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ ) return config, inputs_dict def lowercase ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs() return config, inputs_dict def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : Optional[int] ): _UpperCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval() _UpperCAmelCase = model(**snake_case_ ) _UpperCAmelCase = outputs.encoder_last_hidden_state _UpperCAmelCase = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = model.get_encoder() encoder.save_pretrained(snake_case_ ) _UpperCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = model.create_network_inputs(**snake_case_ ) _UpperCAmelCase , _UpperCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) _UpperCAmelCase = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) _UpperCAmelCase = encoder(inputs_embeds=snake_case_ )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) _UpperCAmelCase = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) _UpperCAmelCase = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) _UpperCAmelCase = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) _UpperCAmelCase = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = model.get_decoder() decoder.save_pretrained(snake_case_ ) _UpperCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ ) _UpperCAmelCase = decoder( trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _lowerCamelCase : List[Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () _lowerCamelCase : Tuple = (AutoformerForPrediction,) if is_torch_available() else () _lowerCamelCase : List[Any] = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {} _lowerCamelCase : Optional[Any] = False _lowerCamelCase : Tuple = False _lowerCamelCase : int = False _lowerCamelCase : Optional[Any] = False _lowerCamelCase : Optional[Any] = False _lowerCamelCase : List[Any] = False def lowercase ( self : Tuple ): _UpperCAmelCase = AutoformerModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ ) def lowercase ( self : Optional[Any] ): self.config_tester.run_common_tests() def lowercase ( self : Union[str, Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case_ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(snake_case_ ) _UpperCAmelCase , _UpperCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ ) self.assertEqual(info["missing_keys"] , [] ) def lowercase ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ ) @unittest.skip(reason="Model has no tokens embeddings" ) def lowercase ( self : Optional[int] ): pass def lowercase ( self : Optional[int] ): _UpperCAmelCase = inspect.signature(getattr(snake_case_ , "forward" ) ) # The main input is the name of the argument after `self` _UpperCAmelCase = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , snake_case_ ) def lowercase ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case_ ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("future_observed_mask" ) expected_arg_names.extend( [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ ) def lowercase ( self : Optional[int] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = True _UpperCAmelCase = getattr(self.model_tester , "seq_length" , snake_case_ ) _UpperCAmelCase = getattr(self.model_tester , "decoder_seq_length" , snake_case_ ) _UpperCAmelCase = getattr(self.model_tester , "encoder_seq_length" , snake_case_ ) _UpperCAmelCase = getattr(self.model_tester , "d_model" , snake_case_ ) _UpperCAmelCase = getattr(self.model_tester , "num_attention_heads" , snake_case_ ) _UpperCAmelCase = d_model // num_attention_heads for model_class in self.all_model_classes: _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) _UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) _UpperCAmelCase = outputs.encoder_attentions self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) _UpperCAmelCase = len(snake_case_ ) _UpperCAmelCase = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(snake_case_ , snake_case_ ) # decoder attentions _UpperCAmelCase = outputs.decoder_attentions self.assertIsInstance(snake_case_ , (list, tuple) ) self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions _UpperCAmelCase = outputs.cross_attentions self.assertIsInstance(snake_case_ , (list, tuple) ) self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) self.assertEqual(out_len + 2 , len(snake_case_ ) ) _UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def lowercase ( self : Dict ): super().test_retain_grad_hidden_states_attentions() def UpperCAmelCase_ ( __lowercase : str="train-batch.pt" ) -> List[str]: '''simple docstring''' _UpperCAmelCase = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__lowercase , repo_type="dataset" ) _UpperCAmelCase = torch.load(__lowercase , map_location=__lowercase ) return batch @require_torch @slow class A_ ( unittest.TestCase ): def lowercase ( self : Optional[int] ): _UpperCAmelCase = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ ) _UpperCAmelCase = prepare_batch() with torch.no_grad(): _UpperCAmelCase = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0] _UpperCAmelCase = torch.Size( (6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , snake_case_ ) _UpperCAmelCase = torch.tensor( [[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=snake_case_ ) self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : Optional[Any] ): _UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ ) _UpperCAmelCase = prepare_batch("val-batch.pt" ) with torch.no_grad(): _UpperCAmelCase = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state _UpperCAmelCase = torch.Size((6_4, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , snake_case_ ) _UpperCAmelCase = torch.tensor( [[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=snake_case_ ) self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : Tuple ): _UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ ) _UpperCAmelCase = prepare_batch("val-batch.pt" ) with torch.no_grad(): _UpperCAmelCase = model.generate( static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , ) _UpperCAmelCase = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , snake_case_ ) _UpperCAmelCase = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=snake_case_ ) _UpperCAmelCase = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
22
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a : Union[str, Any] = logging.get_logger(__name__) a : Optional[Any] = { '''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''', '''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''', } class __UpperCamelCase ( a__ ): lowerCamelCase : Optional[int] ="""falcon""" lowerCamelCase : Union[str, Any] =["""past_key_values"""] def __init__( self , lowerCAmelCase__=6_5024 , lowerCAmelCase__=4544 , lowerCAmelCase__=32 , lowerCAmelCase__=71 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=0.02 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=11 , lowerCAmelCase__=11 , **lowerCAmelCase__ , ) -> Any: a : Optional[int] = vocab_size # Backward compatibility with n_embed kwarg a : Tuple = kwargs.pop("n_embed" , lowerCAmelCase__ ) a : int = hidden_size if n_embed is None else n_embed a : Dict = num_hidden_layers a : Any = num_attention_heads a : List[str] = layer_norm_epsilon a : Any = initializer_range a : Dict = use_cache a : Optional[int] = hidden_dropout a : Optional[int] = attention_dropout a : Optional[int] = bos_token_id a : List[Any] = eos_token_id a : str = num_attention_heads if num_kv_heads is None else num_kv_heads a : Union[str, Any] = alibi a : Any = new_decoder_architecture a : Dict = multi_query # Ignored when new_decoder_architecture is True a : List[Any] = parallel_attn a : List[str] = bias super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) @property def __a ( self ) -> Optional[int]: return self.hidden_size // self.num_attention_heads @property def __a ( self ) -> Dict: return not self.alibi
105
'''simple docstring''' from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging __SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__) class A_ : _lowerCamelCase : str _lowerCamelCase : str = None @staticmethod def lowercase ( ): raise NotImplementedError def lowercase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : str , **snake_case_ : List[Any] ): raise NotImplementedError def lowercase ( self : Any , snake_case_ : int ): raise NotImplementedError def lowercase ( self : List[str] ): if not self.is_available(): raise RuntimeError( f'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' ) @classmethod def lowercase ( cls : List[Any] ): return f'`pip install {cls.pip_package or cls.name}`' class A_ ( lowerCAmelCase_ ): _lowerCamelCase : int = """optuna""" @staticmethod def lowercase ( ): return is_optuna_available() def lowercase ( self : List[str] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : Tuple ): return run_hp_search_optuna(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) def lowercase ( self : int , snake_case_ : Optional[int] ): return default_hp_space_optuna(snake_case_ ) class A_ ( lowerCAmelCase_ ): _lowerCamelCase : Any = """ray""" _lowerCamelCase : Tuple = """'ray[tune]'""" @staticmethod def lowercase ( ): return is_ray_available() def lowercase ( self : Optional[Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : List[str] ): return run_hp_search_ray(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) def lowercase ( self : Any , snake_case_ : str ): return default_hp_space_ray(snake_case_ ) class A_ ( lowerCAmelCase_ ): _lowerCamelCase : int = """sigopt""" @staticmethod def lowercase ( ): return is_sigopt_available() def lowercase ( self : Any , snake_case_ : int , snake_case_ : int , snake_case_ : str , **snake_case_ : Dict ): return run_hp_search_sigopt(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) def lowercase ( self : Dict , snake_case_ : Optional[Any] ): return default_hp_space_sigopt(snake_case_ ) class A_ ( lowerCAmelCase_ ): _lowerCamelCase : Optional[int] = """wandb""" @staticmethod def lowercase ( ): return is_wandb_available() def lowercase ( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : str , **snake_case_ : Optional[Any] ): return run_hp_search_wandb(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) def lowercase ( self : Any , snake_case_ : Union[str, Any] ): return default_hp_space_wandb(snake_case_ ) __SCREAMING_SNAKE_CASE :Dict = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def UpperCAmelCase_ ( ) -> str: '''simple docstring''' _UpperCAmelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(__lowercase ) > 0: _UpperCAmelCase = available_backends[0].name if len(__lowercase ) > 1: logger.info( f'{len(__lowercase )} hyperparameter search backends available. Using {name} as the default.' ) return name raise RuntimeError( "No hyperparameter search backend available.\n" + "\n".join( f' - To install {backend.name} run {backend.pip_install()}' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
22
0
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" def __init__( self : List[Any] ,lowercase_ : Tuple ,lowercase_ : Dict ,lowercase_ : str ): lowerCAmelCase__ : int = dataset lowerCAmelCase__ : List[str] = process lowerCAmelCase__ : Dict = params def __len__( self : Any ): return len(self.dataset ) def __getitem__( self : Union[str, Any] ,lowercase_ : List[Any] ): lowerCAmelCase__ : Union[str, Any] = self.dataset[i] lowerCAmelCase__ : Optional[Any] = self.process(lowercase_ ,**self.params ) return processed class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" def __init__( self : Optional[int] ,lowercase_ : Optional[Any] ,lowercase_ : List[Any] ,lowercase_ : Optional[Any] ,lowercase_ : Tuple=None ): lowerCAmelCase__ : List[Any] = loader lowerCAmelCase__ : int = infer lowerCAmelCase__ : List[str] = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether lowerCAmelCase__ : int = None lowerCAmelCase__ : Dict = loader_batch_size # Internal bookkeeping lowerCAmelCase__ : Optional[Any] = None lowerCAmelCase__ : Optional[int] = None def __len__( self : Union[str, Any] ): return len(self.loader ) def __iter__( self : List[Any] ): lowerCAmelCase__ : List[Any] = iter(self.loader ) return self def __lowerCAmelCase ( self : Tuple ): if isinstance(self._loader_batch_data ,torch.Tensor ): # Batch data is simple tensor, just fetch the slice lowerCAmelCase__ : Tuple = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) lowerCAmelCase__ : int = {} for k, element in self._loader_batch_data.items(): if isinstance(lowercase_ ,lowercase_ ): # Convert ModelOutput to tuple first lowerCAmelCase__ : List[Any] = element.to_tuple() if isinstance(element[0] ,torch.Tensor ): lowerCAmelCase__ : List[Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] ,np.ndarray ): lowerCAmelCase__ : str = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowercase_ ,lowercase_ ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] ,torch.Tensor ): lowerCAmelCase__ : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] ,np.ndarray ): lowerCAmelCase__ : Optional[int] = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around lowerCAmelCase__ : Dict = None elif isinstance(element[self._loader_batch_index] ,torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCAmelCase__ : str = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] ,np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCAmelCase__ : Tuple = np.expand_dims(element[self._loader_batch_index] ,0 ) else: # This is typically a list, so no need to `unsqueeze`. lowerCAmelCase__ : int = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 lowerCAmelCase__ : int = self._loader_batch_data.__class__(lowercase_ ) self._loader_batch_index += 1 return result def __lowerCAmelCase ( self : Optional[int] ): if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch lowerCAmelCase__ : Dict = next(self.iterator ) lowerCAmelCase__ : List[Any] = self.infer(lowercase_ ,**self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(lowercase_ ,torch.Tensor ): lowerCAmelCase__ : int = processed else: lowerCAmelCase__ : Union[str, Any] = list(processed.keys() )[0] lowerCAmelCase__ : Union[str, Any] = processed[key] if isinstance(lowercase_ ,lowercase_ ): lowerCAmelCase__ : List[Any] = len(lowercase_ ) else: lowerCAmelCase__ : List[str] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCAmelCase__ : Optional[Any] = observed_batch_size # Setting internal index to unwrap the batch lowerCAmelCase__ : str = processed lowerCAmelCase__ : Any = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" def __init__( self : int ,lowercase_ : str ,lowercase_ : str ,lowercase_ : Union[str, Any] ,lowercase_ : int=None ): super().__init__(lowercase_ ,lowercase_ ,lowercase_ ) def __iter__( self : List[Any] ): lowerCAmelCase__ : Dict = iter(self.loader ) lowerCAmelCase__ : Tuple = None return self def __lowerCAmelCase ( self : Optional[int] ): if self.subiterator is None: lowerCAmelCase__ : List[Any] = self.infer(next(self.iterator ) ,**self.params ) try: # Try to return next item lowerCAmelCase__ : Optional[int] = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators lowerCAmelCase__ : Any = self.infer(next(self.iterator ) ,**self.params ) lowerCAmelCase__ : int = next(self.subiterator ) return processed class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" def __iter__( self : Tuple ): lowerCAmelCase__ : int = iter(self.loader ) return self def __lowerCAmelCase ( self : List[Any] ): # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. lowerCAmelCase__ : Tuple = False lowerCAmelCase__ : str = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: lowerCAmelCase__ : Dict = self.loader_batch_item() lowerCAmelCase__ : Optional[Any] = item.pop('''is_last''' ) accumulator.append(lowercase_ ) if is_last: return accumulator while not is_last: lowerCAmelCase__ : Any = self.infer(next(self.iterator ) ,**self.params ) if self.loader_batch_size is not None: if isinstance(lowercase_ ,torch.Tensor ): lowerCAmelCase__ : Tuple = processed else: lowerCAmelCase__ : List[Any] = list(processed.keys() )[0] lowerCAmelCase__ : Union[str, Any] = processed[key] if isinstance(lowercase_ ,lowercase_ ): lowerCAmelCase__ : Tuple = len(lowercase_ ) else: lowerCAmelCase__ : str = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCAmelCase__ : Optional[int] = observed_batch_size lowerCAmelCase__ : Optional[int] = processed lowerCAmelCase__ : Optional[int] = 0 while self._loader_batch_index < self.loader_batch_size: lowerCAmelCase__ : Any = self.loader_batch_item() lowerCAmelCase__ : Optional[Any] = item.pop('''is_last''' ) accumulator.append(lowercase_ ) if is_last: return accumulator else: lowerCAmelCase__ : Dict = processed lowerCAmelCase__ : Tuple = item.pop('''is_last''' ) accumulator.append(lowercase_ ) return accumulator class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" def __init__( self : int ,lowercase_ : Dataset ,lowercase_ : str ): lowerCAmelCase__ : List[Any] = dataset lowerCAmelCase__ : List[Any] = key def __len__( self : List[Any] ): return len(self.dataset ) def __getitem__( self : str ,lowercase_ : Union[str, Any] ): return self.dataset[i][self.key] class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" def __init__( self : Dict ,lowercase_ : Dataset ,lowercase_ : str ,lowercase_ : str ): lowerCAmelCase__ : str = dataset lowerCAmelCase__ : List[str] = keya lowerCAmelCase__ : Optional[Any] = keya def __len__( self : str ): return len(self.dataset ) def __getitem__( self : Optional[int] ,lowercase_ : Union[str, Any] ): return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
106
'''simple docstring''' __SCREAMING_SNAKE_CASE :List[str] = '''0.18.2''' from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
22
0
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def __magic_name__ ( A : Tuple, A : List[Any], A : List[Any], A : Dict ): '''simple docstring''' for param, grad_param in zip(model_a.parameters(), model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad, grad_param.grad ) is False ), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad, grad_param.grad ) is True ), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def __magic_name__ ( A : List[Any], A : int, A : Optional[Any], A : Optional[int], A : Any=True ): '''simple docstring''' model.train() a = model(A ) a = F.mse_loss(A, target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(A ) def __magic_name__ ( A : Any, A : Any=False ): '''simple docstring''' set_seed(42 ) a = RegressionModel() a = deepcopy(A ) a = RegressionDataset(length=80 ) a = DataLoader(A, batch_size=16 ) model.to(accelerator.device ) if sched: a = AdamW(params=model.parameters(), lr=1E-3 ) a = AdamW(params=ddp_model.parameters(), lr=1E-3 ) a = LambdaLR(A, lr_lambda=lambda A : epoch**0.65 ) a = LambdaLR(A, lr_lambda=lambda A : epoch**0.65 ) # Make a copy of `model` if sched: a , a , a , a = accelerator.prepare(A, A, A, A ) else: a , a = accelerator.prepare(A, A ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def __magic_name__ ( A : List[Any] ): '''simple docstring''' a , a , a = get_training_setup(A ) # Use a single batch a , a = next(iter(A ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model a , a = accelerator.gather((ddp_input, ddp_target) ) a , a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(A, A, A, A ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(A ): step_model(A, A, A, A ) else: # Sync grads step_model(A, A, A, A ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(A, A, A, A ) for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad, ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) a = ddp_input[torch.randperm(len(A ) )] def __magic_name__ ( A : Optional[int] ): '''simple docstring''' a , a , a = get_training_setup(A ) # Use a single batch a , a = next(iter(A ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model a , a = accelerator.gather((ddp_input, ddp_target) ) a , a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(A, A, A, A ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(A ): step_model(A, A, A, A ) else: # Sync grads step_model(A, A, A, A ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad, ddp_param.grad ) is False ), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad, ddp_param.grad ) is True ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) a = ddp_input[torch.randperm(len(A ) )] def __magic_name__ ( A : List[Any]=False, A : List[Any]=False ): '''simple docstring''' a = Accelerator( split_batches=A, dispatch_batches=A, gradient_accumulation_steps=2 ) # Test that context manager behaves properly a , a , a = get_training_setup(A ) for iteration, batch in enumerate(A ): a , a = batch.values() # Gather the distributed inputs and targs for the base model a , a = accelerator.gather((ddp_input, ddp_target) ) a , a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(A, A, A, A, A ) # Do "gradient accumulation" (noop) with accelerator.accumulate(A ): step_model(A, A, A, A ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(A ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad, ddp_param.grad ) is True ), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad, ddp_param.grad ) is False ), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) a = ddp_input[torch.randperm(len(A ) )] GradientState._reset_state() def __magic_name__ ( A : List[Any]=False, A : Any=False ): '''simple docstring''' a = Accelerator( split_batches=A, dispatch_batches=A, gradient_accumulation_steps=2 ) # Test that context manager behaves properly a , a , a , a , a , a , a = get_training_setup(A, A ) for iteration, batch in enumerate(A ): a , a = batch.values() # Gather the distributed inputs and targs for the base model a , a = accelerator.gather((ddp_input, ddp_target) ) a , a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(A, A, A, A, A ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(A )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(A ): step_model(A, A, A, A ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(A )) if accelerator.num_processes > 1: check_model_parameters(A, A, A, A ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def __magic_name__ ( ): '''simple docstring''' a = Accelerator() a = RegressionDataset(length=80 ) a = DataLoader(A, batch_size=16 ) a = RegressionDataset(length=96 ) a = DataLoader(A, batch_size=16 ) a , a = accelerator.prepare(A, A ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(A ): assert id(accelerator.gradient_state.active_dataloader ) == id(A ) if iteration < len(A ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(A ): assert id(accelerator.gradient_state.active_dataloader ) == id(A ) if batch_num < len(A ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def __magic_name__ ( ): '''simple docstring''' a = Accelerator() a = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(A ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(A ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, ", F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""", ) test_gradient_accumulation(A, A ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<", "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, ", "`split_batches=False`, `dispatch_batches=False`**", ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, ", F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""", ) test_gradient_accumulation_with_opt_and_scheduler(A, A ) def __magic_name__ ( A : Optional[int] ): '''simple docstring''' main() if __name__ == "__main__": main()
107
'''simple docstring''' import re from filelock import FileLock try: import nltk __SCREAMING_SNAKE_CASE :Optional[int] = True except (ImportError, ModuleNotFoundError): __SCREAMING_SNAKE_CASE :str = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def UpperCAmelCase_ ( __lowercase : str ) -> str: '''simple docstring''' re.sub("<n>" , "" , __lowercase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__lowercase ) )
22
0
"""simple docstring""" def a__ ( SCREAMING_SNAKE_CASE : int = 1_0 , SCREAMING_SNAKE_CASE : int = 1_0_0_0 , SCREAMING_SNAKE_CASE : bool = True ): '''simple docstring''' assert ( isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" ) return min_val if option else max_val def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): '''simple docstring''' return int((number_a + number_a) / 2 ) def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): '''simple docstring''' assert ( isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ), 'argument values must be type of "int"' if lower > higher: raise ValueError("argument value for lower and higher must be(lower > higher)" ) if not lower < to_guess < higher: raise ValueError( "guess value must be within the range of lower and higher value" ) def answer(SCREAMING_SNAKE_CASE : int ) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print("started..." ) lowerCAmelCase : List[str] = lower lowerCAmelCase : Tuple = higher lowerCAmelCase : Optional[Any] = [] while True: lowerCAmelCase : int = get_avg(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) last_numbers.append(SCREAMING_SNAKE_CASE ) if answer(SCREAMING_SNAKE_CASE ) == "low": lowerCAmelCase : Dict = number elif answer(SCREAMING_SNAKE_CASE ) == "high": lowerCAmelCase : int = number else: break print(f"""guess the number : {last_numbers[-1]}""" ) print(f"""details : {last_numbers!s}""" ) def a__ ( ): '''simple docstring''' lowerCAmelCase : List[Any] = int(input("Enter lower value : " ).strip() ) lowerCAmelCase : Optional[int] = int(input("Enter high value : " ).strip() ) lowerCAmelCase : Optional[int] = int(input("Enter value to guess : " ).strip() ) guess_the_number(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
108
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class A_ : def __init__( self : str , snake_case_ : int , snake_case_ : Union[str, Any]=2 , snake_case_ : List[Any]=True , snake_case_ : str=False , snake_case_ : str=1_0 , snake_case_ : str=3 , snake_case_ : Dict=3_2 * 4 , snake_case_ : Any=3_2 * 6 , snake_case_ : Optional[Any]=4 , snake_case_ : Optional[int]=3_2 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = is_training _UpperCAmelCase = use_auxiliary_loss _UpperCAmelCase = num_queries _UpperCAmelCase = num_channels _UpperCAmelCase = min_size _UpperCAmelCase = max_size _UpperCAmelCase = num_labels _UpperCAmelCase = mask_feature_size def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( snake_case_ ) _UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_ ) _UpperCAmelCase = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_ ) > 0.5 ).float() _UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=snake_case_ ) > 0.5).long() _UpperCAmelCase = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase ( self : List[Any] ): return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def lowercase ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def lowercase ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ): _UpperCAmelCase = output.encoder_hidden_states _UpperCAmelCase = output.pixel_decoder_hidden_states _UpperCAmelCase = output.transformer_decoder_hidden_states self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(snake_case_ ) , config.decoder_config.decoder_layers ) def lowercase ( self : Tuple , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Optional[Any]=False ): with torch.no_grad(): _UpperCAmelCase = MaskFormerModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() _UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ ) _UpperCAmelCase = model(snake_case_ , output_hidden_states=snake_case_ ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(snake_case_ , snake_case_ ) def lowercase ( self : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : str , snake_case_ : List[Any] ): _UpperCAmelCase = MaskFormerForInstanceSegmentation(config=snake_case_ ) model.to(snake_case_ ) model.eval() def comm_check_on_output(snake_case_ : int ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ ) _UpperCAmelCase = model(snake_case_ ) comm_check_on_output(snake_case_ ) _UpperCAmelCase = model( pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ) comm_check_on_output(snake_case_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _lowerCamelCase : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () _lowerCamelCase : Tuple = ( {"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) _lowerCamelCase : Optional[Any] = False _lowerCamelCase : Dict = False _lowerCamelCase : Any = False _lowerCamelCase : List[Any] = False def lowercase ( self : Optional[int] ): _UpperCAmelCase = MaskFormerModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ ) def lowercase ( self : Optional[Any] ): self.config_tester.run_common_tests() def lowercase ( self : Union[str, Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ ) def lowercase ( self : int ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*snake_case_ ) @unittest.skip(reason="MaskFormer does not use inputs_embeds" ) def lowercase ( self : Any ): pass @unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" ) def lowercase ( self : List[str] ): pass @unittest.skip(reason="MaskFormer is not a generative model" ) def lowercase ( self : List[str] ): pass @unittest.skip(reason="MaskFormer does not use token embeddings" ) def lowercase ( self : List[Any] ): pass @require_torch_multi_gpu @unittest.skip( reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def lowercase ( self : Any ): pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def lowercase ( self : Union[str, Any] ): pass def lowercase ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case_ ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case_ ) @slow def lowercase ( self : Optional[int] ): for model_name in ["facebook/maskformer-swin-small-coco"]: _UpperCAmelCase = MaskFormerModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def lowercase ( self : Optional[int] ): _UpperCAmelCase = (self.model_tester.min_size,) * 2 _UpperCAmelCase = { "pixel_values": torch.randn((2, 3, *size) , device=snake_case_ ), "mask_labels": torch.randn((2, 1_0, *size) , device=snake_case_ ), "class_labels": torch.zeros(2 , 1_0 , device=snake_case_ ).long(), } _UpperCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(snake_case_ ) _UpperCAmelCase = model(**snake_case_ ) self.assertTrue(outputs.loss is not None ) def lowercase ( self : Dict ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ ) def lowercase ( self : Any ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case_ ).to(snake_case_ ) _UpperCAmelCase = model(**snake_case_ , output_attentions=snake_case_ ) self.assertTrue(outputs.attentions is not None ) def lowercase ( self : int ): if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _UpperCAmelCase = self.all_model_classes[1] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.train() _UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ).loss loss.backward() def lowercase ( self : int ): # only MaskFormerForInstanceSegmentation has the loss _UpperCAmelCase = self.all_model_classes[1] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.train() _UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ) _UpperCAmelCase = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _UpperCAmelCase = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _UpperCAmelCase = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _UpperCAmelCase = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=snake_case_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __SCREAMING_SNAKE_CASE :Dict = 1e-4 def UpperCAmelCase_ ( ) -> List[str]: '''simple docstring''' _UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_vision @slow class A_ ( unittest.TestCase ): @cached_property def lowercase ( self : Dict ): return ( MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" ) if is_vision_available() else None ) def lowercase ( self : List[Any] ): _UpperCAmelCase = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(snake_case_ ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ ) _UpperCAmelCase = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) _UpperCAmelCase = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(snake_case_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) _UpperCAmelCase = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(snake_case_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) _UpperCAmelCase = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(snake_case_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : Tuple ): _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(snake_case_ ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ ) _UpperCAmelCase = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) # masks_queries_logits _UpperCAmelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _UpperCAmelCase = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] _UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) # class_queries_logits _UpperCAmelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _UpperCAmelCase = torch.tensor( [ [1.6_512e00, -5.2_572e00, -3.3_519e00], [3.6_169e-02, -5.9_025e00, -2.9_313e00], [1.0_766e-04, -7.7_630e00, -5.1_263e00], ] ).to(snake_case_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : int ): _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" ) .to(snake_case_ ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ ) _UpperCAmelCase = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) # masks_queries_logits _UpperCAmelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _UpperCAmelCase = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] _UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) # class_queries_logits _UpperCAmelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _UpperCAmelCase = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(snake_case_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : List[Any] ): _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(snake_case_ ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="pt" , ) _UpperCAmelCase = inputs["pixel_values"].to(snake_case_ ) _UpperCAmelCase = [el.to(snake_case_ ) for el in inputs["mask_labels"]] _UpperCAmelCase = [el.to(snake_case_ ) for el in inputs["class_labels"]] with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) self.assertTrue(outputs.loss is not None )
22
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging A: Any = logging.get_logger(__name__) if is_vision_available(): import PIL class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): __lowerCAmelCase : str = ['pixel_values'] def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , **_SCREAMING_SNAKE_CASE , ) -> None: '''simple docstring''' super().__init__(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase : int = size if size is not None else {"""shortest_edge""": 224} UpperCAmelCase : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : List[Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} UpperCAmelCase : List[str] = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE , param_name="""crop_size""" ) UpperCAmelCase : Tuple = do_resize UpperCAmelCase : Dict = size UpperCAmelCase : List[Any] = resample UpperCAmelCase : Union[str, Any] = do_center_crop UpperCAmelCase : List[str] = crop_size UpperCAmelCase : List[Any] = do_rescale UpperCAmelCase : int = rescale_factor UpperCAmelCase : Optional[int] = do_normalize UpperCAmelCase : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN UpperCAmelCase : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD UpperCAmelCase : List[Any] = do_convert_rgb def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray: '''simple docstring''' UpperCAmelCase : int = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) if "shortest_edge" not in size: raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) UpperCAmelCase : int = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size["""shortest_edge"""] , default_to_square=_SCREAMING_SNAKE_CASE ) return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray: '''simple docstring''' UpperCAmelCase : Union[str, Any] = get_size_dict(_SCREAMING_SNAKE_CASE ) if "height" not in size or "width" not in size: raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" ) return center_crop(_SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: '''simple docstring''' return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray: '''simple docstring''' return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image: '''simple docstring''' UpperCAmelCase : List[Any] = do_resize if do_resize is not None else self.do_resize UpperCAmelCase : Tuple = size if size is not None else self.size UpperCAmelCase : Union[str, Any] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""size""" , default_to_square=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Union[str, Any] = resample if resample is not None else self.resample UpperCAmelCase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size UpperCAmelCase : Union[str, Any] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" , default_to_square=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase : str = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase : str = image_mean if image_mean is not None else self.image_mean UpperCAmelCase : Optional[Any] = image_std if image_std is not None else self.image_std UpperCAmelCase : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb UpperCAmelCase : Any = make_list_of_images(_SCREAMING_SNAKE_CASE ) if not valid_images(_SCREAMING_SNAKE_CASE ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: UpperCAmelCase : int = [convert_to_rgb(_SCREAMING_SNAKE_CASE ) for image in images] # All transformations expect numpy arrays. UpperCAmelCase : Optional[int] = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images] if do_resize: UpperCAmelCase : Dict = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images] if do_center_crop: UpperCAmelCase : Tuple = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: UpperCAmelCase : List[Any] = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: UpperCAmelCase : str = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images] UpperCAmelCase : Union[str, Any] = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] UpperCAmelCase : Union[str, Any] = {"""pixel_values""": images} return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
109
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: __SCREAMING_SNAKE_CASE :List[Any] = None __SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE :List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} __SCREAMING_SNAKE_CASE :List[Any] = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''', }, } __SCREAMING_SNAKE_CASE :Optional[Any] = { '''albert-base-v1''': 512, '''albert-large-v1''': 512, '''albert-xlarge-v1''': 512, '''albert-xxlarge-v1''': 512, '''albert-base-v2''': 512, '''albert-large-v2''': 512, '''albert-xlarge-v2''': 512, '''albert-xxlarge-v2''': 512, } __SCREAMING_SNAKE_CASE :Optional[int] = '''▁''' class A_ ( lowerCAmelCase_ ): _lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES _lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : int = AlbertTokenizer def __init__( self : Optional[Any] , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=True , snake_case_ : str=True , snake_case_ : Tuple=False , snake_case_ : List[Any]="[CLS]" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : str="<unk>" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : List[Any]="<pad>" , snake_case_ : List[str]="[CLS]" , snake_case_ : int="[MASK]" , **snake_case_ : Any , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _UpperCAmelCase = ( AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token ) super().__init__( snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , ) _UpperCAmelCase = do_lower_case _UpperCAmelCase = remove_space _UpperCAmelCase = keep_accents _UpperCAmelCase = vocab_file _UpperCAmelCase = False if not self.vocab_file else True def lowercase ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ): _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowercase ( self : Dict , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ): _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(snake_case_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return _UpperCAmelCase = os.path.join( snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ): copyfile(self.vocab_file , snake_case_ ) return (out_vocab_file,)
22
0
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class _a ( unittest.TestCase ): def lowerCamelCase_ ( self: List[Any] ) -> int: """simple docstring""" lowercase__ = mock.Mock() lowercase__ = 500 lowercase__ = {} lowercase__ = HTTPError lowercase__ = {} # Download this model to make sure it's in the cache. lowercase__ = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=UpperCamelCase_ ) as mock_head: lowercase__ = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def lowerCamelCase_ ( self: int ) -> List[Any]: """simple docstring""" lowercase__ = mock.Mock() lowercase__ = 500 lowercase__ = {} lowercase__ = HTTPError lowercase__ = {} # Download this model to make sure it's in the cache. lowercase__ = GPTaTokenizerFast.from_pretrained('''gpt2''' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=UpperCamelCase_ ) as mock_head: lowercase__ = GPTaTokenizerFast.from_pretrained('''gpt2''' ) # This check we did call the fake head request mock_head.assert_called() def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]: """simple docstring""" try: lowercase__ = tempfile.mktemp() with open(UpperCamelCase_ , '''wb''' ) as f: http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , UpperCamelCase_ ) lowercase__ = AlbertTokenizer.from_pretrained(UpperCamelCase_ ) finally: os.remove(UpperCamelCase_ ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile('''tokenizer.json''' ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open('''tokenizer.json''' , '''wb''' ) as f: http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , UpperCamelCase_ ) lowercase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size , 1_000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove('''tokenizer.json''' ) def lowerCamelCase_ ( self: str ) -> Optional[int]: """simple docstring""" lowercase__ = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' ) @is_staging_test class _a ( unittest.TestCase ): _lowercase : Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def lowerCamelCase_ ( cls: Optional[int] ) -> str: """simple docstring""" lowercase__ = TOKEN HfFolder.save_token(UpperCamelCase_ ) @classmethod def lowerCamelCase_ ( cls: Tuple ) -> Any: """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-tokenizer''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''' ) except HTTPError: pass def lowerCamelCase_ ( self: Dict ) -> str: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ = os.path.join(UpperCamelCase_ , '''vocab.txt''' ) with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) lowercase__ = BertTokenizer(UpperCamelCase_ ) tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token ) lowercase__ = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id='''test-tokenizer''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ , repo_id='''test-tokenizer''' , push_to_hub=UpperCamelCase_ , use_auth_token=self._token ) lowercase__ = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ = os.path.join(UpperCamelCase_ , '''vocab.txt''' ) with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) lowercase__ = BertTokenizer(UpperCamelCase_ ) tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token ) lowercase__ = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( UpperCamelCase_ , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=UpperCamelCase_ , use_auth_token=self._token ) lowercase__ = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) @require_tokenizers def lowerCamelCase_ ( self: Any ) -> Tuple: """simple docstring""" CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ = os.path.join(UpperCamelCase_ , '''vocab.txt''' ) with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) lowercase__ = CustomTokenizer(UpperCamelCase_ ) # No fast custom tokenizer tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token ) lowercase__ = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' , trust_remote_code=UpperCamelCase_ ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ = os.path.join(UpperCamelCase_ , '''vocab.txt''' ) with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) lowercase__ = BertTokenizerFast.from_pretrained(UpperCamelCase_ ) bert_tokenizer.save_pretrained(UpperCamelCase_ ) lowercase__ = CustomTokenizerFast.from_pretrained(UpperCamelCase_ ) tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token ) lowercase__ = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' , trust_remote_code=UpperCamelCase_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''' ) lowercase__ = AutoTokenizer.from_pretrained( f'{USER}/test-dynamic-tokenizer' , use_fast=UpperCamelCase_ , trust_remote_code=UpperCamelCase_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' ) class _a ( unittest.TestCase ): def lowerCamelCase_ ( self: Tuple ) -> Any: """simple docstring""" lowercase__ = Trie() trie.add('''Hello 友達''' ) self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} ) trie.add('''Hello''' ) trie.data self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} ) def lowerCamelCase_ ( self: Dict ) -> int: """simple docstring""" lowercase__ = Trie() self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS] This is a extra_id_100'''] ) trie.add('''[CLS]''' ) trie.add('''extra_id_1''' ) trie.add('''extra_id_100''' ) self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] ) def lowerCamelCase_ ( self: Optional[int] ) -> List[str]: """simple docstring""" lowercase__ = Trie() trie.add('''A''' ) self.assertEqual(trie.split('''ABC''' ) , ['''A''', '''BC'''] ) self.assertEqual(trie.split('''BCA''' ) , ['''BC''', '''A'''] ) def lowerCamelCase_ ( self: str ) -> Optional[Any]: """simple docstring""" lowercase__ = Trie() trie.add('''TOKEN]''' ) trie.add('''[SPECIAL_TOKEN]''' ) self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] ) def lowerCamelCase_ ( self: List[str] ) -> Dict: """simple docstring""" lowercase__ = Trie() trie.add('''A''' ) trie.add('''P''' ) trie.add('''[SPECIAL_TOKEN]''' ) self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] ) def lowerCamelCase_ ( self: Dict ) -> Dict: """simple docstring""" lowercase__ = Trie() trie.add('''AB''' ) trie.add('''B''' ) trie.add('''C''' ) self.assertEqual(trie.split('''ABC''' ) , ['''AB''', '''C'''] ) def lowerCamelCase_ ( self: Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ = Trie() trie.add('''ABC''' ) trie.add('''B''' ) trie.add('''CD''' ) self.assertEqual(trie.split('''ABCD''' ) , ['''ABC''', '''D'''] ) def lowerCamelCase_ ( self: str ) -> Union[str, Any]: """simple docstring""" lowercase__ = Trie() lowercase__ = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3] ) self.assertEqual(UpperCamelCase_ , ['''AB''', '''C'''] )
110
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging __SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE :int = { '''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class A_ ( lowerCAmelCase_ ): _lowerCamelCase : int = """perceiver""" def __init__( self : Any , snake_case_ : List[Any]=2_5_6 , snake_case_ : str=1_2_8_0 , snake_case_ : Optional[int]=7_6_8 , snake_case_ : int=1 , snake_case_ : List[Any]=2_6 , snake_case_ : Dict=8 , snake_case_ : List[Any]=8 , snake_case_ : Tuple=None , snake_case_ : Tuple=None , snake_case_ : Any="kv" , snake_case_ : Any=1 , snake_case_ : List[str]=1 , snake_case_ : Optional[int]="gelu" , snake_case_ : List[Any]=0.1 , snake_case_ : Dict=0.0_2 , snake_case_ : int=1e-12 , snake_case_ : List[str]=True , snake_case_ : str=2_6_2 , snake_case_ : Optional[Any]=2_0_4_8 , snake_case_ : Union[str, Any]=5_6 , snake_case_ : Dict=[3_6_8, 4_9_6] , snake_case_ : Tuple=1_6 , snake_case_ : Union[str, Any]=1_9_2_0 , snake_case_ : List[Any]=1_6 , snake_case_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **snake_case_ : List[Any] , ): super().__init__(**snake_case_ ) _UpperCAmelCase = num_latents _UpperCAmelCase = d_latents _UpperCAmelCase = d_model _UpperCAmelCase = num_blocks _UpperCAmelCase = num_self_attends_per_block _UpperCAmelCase = num_self_attention_heads _UpperCAmelCase = num_cross_attention_heads _UpperCAmelCase = qk_channels _UpperCAmelCase = v_channels _UpperCAmelCase = cross_attention_shape_for_attention _UpperCAmelCase = self_attention_widening_factor _UpperCAmelCase = cross_attention_widening_factor _UpperCAmelCase = hidden_act _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = use_query_residual # masked language modeling attributes _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings # image classification attributes _UpperCAmelCase = image_size # flow attributes _UpperCAmelCase = train_size # multimodal autoencoding attributes _UpperCAmelCase = num_frames _UpperCAmelCase = audio_samples_per_frame _UpperCAmelCase = samples_per_patch _UpperCAmelCase = output_shape class A_ ( lowerCAmelCase_ ): @property def lowercase ( self : int ): if self.task == "multiple-choice": _UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"} else: _UpperCAmelCase = {0: "batch", 1: "sequence"} return OrderedDict( [ ("inputs", dynamic_axis), ("attention_mask", dynamic_axis), ] ) @property def lowercase ( self : Optional[Any] ): return 1e-4 def lowercase ( self : List[str] , snake_case_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , snake_case_ : int = 3 , snake_case_ : int = 4_0 , snake_case_ : int = 4_0 , ): # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(snake_case_ , snake_case_ ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _UpperCAmelCase = compute_effective_axis_dimension( snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _UpperCAmelCase = preprocessor.num_special_tokens_to_add(snake_case_ ) _UpperCAmelCase = compute_effective_axis_dimension( snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ ) # Generate dummy inputs according to compute batch and sequence _UpperCAmelCase = [" ".join(["a"] ) * seq_length] * batch_size _UpperCAmelCase = dict(preprocessor(snake_case_ , return_tensors=snake_case_ ) ) _UpperCAmelCase = inputs.pop("input_ids" ) return inputs elif isinstance(snake_case_ , snake_case_ ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _UpperCAmelCase = compute_effective_axis_dimension(snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch ) _UpperCAmelCase = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) _UpperCAmelCase = dict(preprocessor(images=snake_case_ , return_tensors=snake_case_ ) ) _UpperCAmelCase = inputs.pop("pixel_values" ) return inputs else: raise ValueError( "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
22
0
"""simple docstring""" import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ): assert isinstance(__lowercase , __lowercase ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ): lowercase_ : int = tmp_path / 'cache' lowercase_ : Optional[Any] = {'text': 'string'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase_ : str = TextDatasetReader(__lowercase , cache_dir=__lowercase , keep_in_memory=__lowercase ).read() _check_text_dataset(__lowercase , __lowercase ) @pytest.mark.parametrize( 'features' , [ None, {'text': 'string'}, {'text': 'int32'}, {'text': 'float32'}, ] , ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple ): lowercase_ : Optional[int] = tmp_path / 'cache' lowercase_ : Optional[Any] = {'text': 'string'} lowercase_ : str = features.copy() if features else default_expected_features lowercase_ : Any = ( Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) lowercase_ : str = TextDatasetReader(__lowercase , features=__lowercase , cache_dir=__lowercase ).read() _check_text_dataset(__lowercase , __lowercase ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ): lowercase_ : int = tmp_path / 'cache' lowercase_ : Tuple = {'text': 'string'} lowercase_ : List[Any] = TextDatasetReader(__lowercase , cache_dir=__lowercase , split=__lowercase ).read() _check_text_dataset(__lowercase , __lowercase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' , [str, list] ) def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] ): if issubclass(__lowercase , __lowercase ): lowercase_ : Tuple = text_path elif issubclass(__lowercase , __lowercase ): lowercase_ : Any = [text_path] lowercase_ : Tuple = tmp_path / 'cache' lowercase_ : List[str] = {'text': 'string'} lowercase_ : List[Any] = TextDatasetReader(__lowercase , cache_dir=__lowercase ).read() _check_text_dataset(__lowercase , __lowercase ) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any]=("train",) ): assert isinstance(__lowercase , __lowercase ) for split in splits: lowercase_ : List[str] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ): lowercase_ : Tuple = tmp_path / 'cache' lowercase_ : Dict = {'text': 'string'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase_ : Tuple = TextDatasetReader({'train': text_path} , cache_dir=__lowercase , keep_in_memory=__lowercase ).read() _check_text_datasetdict(__lowercase , __lowercase ) @pytest.mark.parametrize( 'features' , [ None, {'text': 'string'}, {'text': 'int32'}, {'text': 'float32'}, ] , ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict ): lowercase_ : int = tmp_path / 'cache' # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" lowercase_ : int = {'text': 'string'} lowercase_ : Union[str, Any] = features.copy() if features else default_expected_features lowercase_ : int = ( Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None ) lowercase_ : Optional[int] = TextDatasetReader({'train': text_path} , features=__lowercase , cache_dir=__lowercase ).read() _check_text_datasetdict(__lowercase , __lowercase ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def lowercase__( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any ): if split: lowercase_ : List[str] = {split: text_path} else: lowercase_ : Optional[int] = 'train' lowercase_ : Optional[int] = {'train': text_path, 'test': text_path} lowercase_ : Union[str, Any] = tmp_path / 'cache' lowercase_ : Optional[int] = {'text': 'string'} lowercase_ : int = TextDatasetReader(__lowercase , cache_dir=__lowercase ).read() _check_text_datasetdict(__lowercase , __lowercase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
213
'''simple docstring''' import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef __SCREAMING_SNAKE_CASE :List[str] = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Tuple ) -> int: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) return (preds == labels).mean() def UpperCAmelCase_ ( __lowercase : int , __lowercase : str ) -> Optional[Any]: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) _UpperCAmelCase = simple_accuracy(__lowercase , __lowercase ) _UpperCAmelCase = fa_score(y_true=__lowercase , y_pred=__lowercase ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : List[str] ) -> List[Any]: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) _UpperCAmelCase = pearsonr(__lowercase , __lowercase )[0] _UpperCAmelCase = spearmanr(__lowercase , __lowercase )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : str , __lowercase : str ) -> Tuple: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) assert len(__lowercase ) == len(__lowercase ), f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}' if task_name == "cola": return {"mcc": matthews_corrcoef(__lowercase , __lowercase )} elif task_name == "sst-2": return {"acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "mrpc": return acc_and_fa(__lowercase , __lowercase ) elif task_name == "sts-b": return pearson_and_spearman(__lowercase , __lowercase ) elif task_name == "qqp": return acc_and_fa(__lowercase , __lowercase ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "qnli": return {"acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "rte": return {"acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "wnli": return {"acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "hans": return {"acc": simple_accuracy(__lowercase , __lowercase )} else: raise KeyError(__lowercase ) def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Dict , __lowercase : str ) -> Union[str, Any]: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) if len(__lowercase ) != len(__lowercase ): raise ValueError(f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}' ) if task_name == "xnli": return {"acc": simple_accuracy(__lowercase , __lowercase )} else: raise KeyError(__lowercase )
22
0
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { '''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class __A ( lowerCAmelCase_ ): '''simple docstring''' __lowerCamelCase : List[str] = """sew-d""" def __init__(self , A=32 , A=768 , A=12 , A=12 , A=3_072 , A=2 , A=512 , A=256 , A=True , A=True , A=("p2c", "c2p") , A="layer_norm" , A="gelu_python" , A=0.1 , A=0.1 , A=0.1 , A=0.0 , A=0.1 , A=0.02 , A=1E-7 , A=1E-5 , A="group" , A="gelu" , A=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A=False , A=128 , A=16 , A=True , A=0.05 , A=10 , A=2 , A=0.0 , A=10 , A=0 , A="mean" , A=False , A=False , A=256 , A=0 , A=1 , A=2 , **A , ) -> int: """simple docstring""" super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ ) _a = hidden_size _a = feat_extract_norm _a = feat_extract_activation _a = list(snake_case_ ) _a = list(snake_case_ ) _a = list(snake_case_ ) _a = conv_bias _a = num_conv_pos_embeddings _a = num_conv_pos_embedding_groups _a = len(self.conv_dim ) _a = num_hidden_layers _a = intermediate_size _a = squeeze_factor _a = max_position_embeddings _a = position_buckets _a = share_att_key _a = relative_attention _a = norm_rel_ebd _a = list(snake_case_ ) _a = hidden_act _a = num_attention_heads _a = hidden_dropout _a = attention_dropout _a = activation_dropout _a = feat_proj_dropout _a = final_dropout _a = layer_norm_eps _a = feature_layer_norm_eps _a = initializer_range _a = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _a = apply_spec_augment _a = mask_time_prob _a = mask_time_length _a = mask_time_min_masks _a = mask_feature_prob _a = mask_feature_length _a = mask_feature_min_masks # ctc loss _a = ctc_loss_reduction _a = ctc_zero_infinity # sequence classification _a = use_weighted_layer_sum _a = classifier_proj_size @property def a__ (self ) -> List[str]: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
211
'''simple docstring''' import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict , __lowercase : str , __lowercase : Optional[Any] , __lowercase : str ) -> List[str]: '''simple docstring''' _UpperCAmelCase = TapasConfig.from_json_file(__lowercase ) # set absolute/relative position embeddings parameter _UpperCAmelCase = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase ) elif task == "WTQ": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = True # hparam_utils.py hparams _UpperCAmelCase = 0.66_4694 _UpperCAmelCase = 0.20_7951 _UpperCAmelCase = 0.12_1194 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = 0.035_2513 _UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = False # hparam_utils.py hparams _UpperCAmelCase = 36.4519 _UpperCAmelCase = 0.90_3421 _UpperCAmelCase = 222.088 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = 0.76_3141 _UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase ) elif task == "TABFACT": _UpperCAmelCase = TapasForSequenceClassification(config=__lowercase ) elif task == "MLM": _UpperCAmelCase = TapasForMaskedLM(config=__lowercase ) elif task == "INTERMEDIATE_PRETRAINING": _UpperCAmelCase = TapasModel(config=__lowercase ) else: raise ValueError(f'Task {task} not supported.' ) print(f'Building PyTorch model from configuration: {config}' ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__lowercase , __lowercase , __lowercase ) # Save pytorch-model (weights and configuration) print(f'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(__lowercase ) # Save tokenizer files print(f'Save tokenizer files to {pytorch_dump_path}' ) _UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 ) tokenizer.save_pretrained(__lowercase ) print("Used relative position embeddings:" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.''' ) parser.add_argument( '''--reset_position_index_per_cell''', default=False, action='''store_true''', help='''Whether to use relative position embeddings or not. Defaults to True.''', ) parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--tapas_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained TAPAS model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __SCREAMING_SNAKE_CASE :List[str] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
22
0
"""simple docstring""" import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin __snake_case : Dict = 1e-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class A__ : '''simple docstring''' def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Dict=16 , _SCREAMING_SNAKE_CASE: Dict=13 , _SCREAMING_SNAKE_CASE: int=7 , _SCREAMING_SNAKE_CASE: Any=14 , _SCREAMING_SNAKE_CASE: int=10 , _SCREAMING_SNAKE_CASE: Any=19 , _SCREAMING_SNAKE_CASE: int=5 , _SCREAMING_SNAKE_CASE: Any=4 , _SCREAMING_SNAKE_CASE: Tuple=True , _SCREAMING_SNAKE_CASE: Optional[int]=16 , _SCREAMING_SNAKE_CASE: List[str]=2 , _SCREAMING_SNAKE_CASE: Any=4 , _SCREAMING_SNAKE_CASE: List[Any]=4 , _SCREAMING_SNAKE_CASE: Optional[Any]="gelu" , _SCREAMING_SNAKE_CASE: Optional[int]=0.1 , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.1 , _SCREAMING_SNAKE_CASE: Tuple=[1, 2, 3, 4, 5] , _SCREAMING_SNAKE_CASE: str=25 , _SCREAMING_SNAKE_CASE: Any=5 , ) -> Optional[Any]: """simple docstring""" __lowerCAmelCase : Union[str, Any] = d_model __lowerCAmelCase : Optional[Any] = parent __lowerCAmelCase : Dict = batch_size __lowerCAmelCase : List[Any] = prediction_length __lowerCAmelCase : Optional[Any] = context_length __lowerCAmelCase : Optional[Any] = cardinality __lowerCAmelCase : int = num_time_features __lowerCAmelCase : Optional[int] = lags_sequence __lowerCAmelCase : Optional[Any] = embedding_dimension __lowerCAmelCase : List[Any] = is_training __lowerCAmelCase : Dict = hidden_size __lowerCAmelCase : Optional[Any] = num_hidden_layers __lowerCAmelCase : Union[str, Any] = num_attention_heads __lowerCAmelCase : Union[str, Any] = intermediate_size __lowerCAmelCase : Dict = hidden_act __lowerCAmelCase : int = hidden_dropout_prob __lowerCAmelCase : str = attention_probs_dropout_prob __lowerCAmelCase : Union[str, Any] = context_length __lowerCAmelCase : List[Any] = prediction_length + label_length __lowerCAmelCase : Tuple = label_length __lowerCAmelCase : str = moving_average __lowerCAmelCase : List[str] = autocorrelation_factor def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[Any]: """simple docstring""" return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Optional[Any]) -> List[Any]: """simple docstring""" __lowerCAmelCase : Dict = config.context_length + max(config.lags_sequence) __lowerCAmelCase : Dict = ids_tensor([self.batch_size, 1] , config.cardinality[0]) __lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, _past_length, config.num_time_features]) __lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, _past_length]) __lowerCAmelCase : Tuple = floats_tensor([self.batch_size, _past_length]) > 0.5 # decoder inputs __lowerCAmelCase : Dict = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features]) __lowerCAmelCase : int = floats_tensor([self.batch_size, config.prediction_length]) __lowerCAmelCase : Union[str, Any] = { "past_values": past_values, "static_categorical_features": static_categorical_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]: """simple docstring""" __lowerCAmelCase : Optional[int] = self.get_config() __lowerCAmelCase : Union[str, Any] = self.prepare_autoformer_inputs_dict(snake_case_) return config, inputs_dict def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Optional[int]: """simple docstring""" __lowerCAmelCase , __lowerCAmelCase : List[Any] = self.prepare_config_and_inputs() return config, inputs_dict def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[Any]: """simple docstring""" __lowerCAmelCase : List[str] = AutoformerModel(config=snake_case_).to(snake_case_).eval() __lowerCAmelCase : Tuple = model(**snake_case_) __lowerCAmelCase : Optional[int] = outputs.encoder_last_hidden_state __lowerCAmelCase : Optional[Any] = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: __lowerCAmelCase : Tuple = model.get_encoder() encoder.save_pretrained(snake_case_) __lowerCAmelCase : int = AutoformerEncoder.from_pretrained(snake_case_).to(snake_case_) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = model.create_network_inputs(**snake_case_) __lowerCAmelCase , __lowerCAmelCase : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...]) __lowerCAmelCase : Tuple = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) __lowerCAmelCase : Optional[int] = encoder(inputs_embeds=snake_case_)[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3) __lowerCAmelCase : str = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1) .unsqueeze(1) .repeat(1 , config.prediction_length , 1) ) __lowerCAmelCase : Any = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) __lowerCAmelCase : Optional[int] = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) __lowerCAmelCase : str = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: __lowerCAmelCase : List[Any] = model.get_decoder() decoder.save_pretrained(snake_case_) __lowerCAmelCase : Any = AutoformerDecoder.from_pretrained(snake_case_).to(snake_case_) __lowerCAmelCase : Dict = decoder( trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3) @require_torch class A__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () SCREAMING_SNAKE_CASE = (AutoformerForPrediction,) if is_torch_available() else () SCREAMING_SNAKE_CASE = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {} SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def _SCREAMING_SNAKE_CASE ( self: Tuple) -> List[Any]: """simple docstring""" __lowerCAmelCase : Optional[int] = AutoformerModelTester(self) __lowerCAmelCase : Dict = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_) def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> int: """simple docstring""" self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[int]: """simple docstring""" __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: __lowerCAmelCase : str = model_class(snake_case_) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(snake_case_) __lowerCAmelCase , __lowerCAmelCase : str = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_) self.assertEqual(info["missing_keys"] , []) def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Optional[Any]: """simple docstring""" __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*snake_case_) @unittest.skip(reason="Model has no tokens embeddings") def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> str: """simple docstring""" pass def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Any: """simple docstring""" __lowerCAmelCase : List[str] = inspect.signature(getattr(snake_case_ , "forward")) # The main input is the name of the argument after `self` __lowerCAmelCase : Tuple = list(model_signature.parameters.keys())[1] self.assertEqual(AutoformerModel.main_input_name , snake_case_) def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase : Dict = model_class(snake_case_) __lowerCAmelCase : Any = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase : Any = [*signature.parameters.keys()] __lowerCAmelCase : Optional[Any] = [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("future_observed_mask") expected_arg_names.extend( [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ]) self.assertListEqual(arg_names[: len(snake_case_)] , snake_case_) def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Any: """simple docstring""" __lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase : Dict = True __lowerCAmelCase : int = getattr(self.model_tester , "seq_length" , snake_case_) __lowerCAmelCase : str = getattr(self.model_tester , "decoder_seq_length" , snake_case_) __lowerCAmelCase : List[str] = getattr(self.model_tester , "encoder_seq_length" , snake_case_) __lowerCAmelCase : int = getattr(self.model_tester , "d_model" , snake_case_) __lowerCAmelCase : Optional[Any] = getattr(self.model_tester , "num_attention_heads" , snake_case_) __lowerCAmelCase : Dict = d_model // num_attention_heads for model_class in self.all_model_classes: __lowerCAmelCase : Optional[Any] = True __lowerCAmelCase : Any = False __lowerCAmelCase : Tuple = True __lowerCAmelCase : Optional[int] = model_class(snake_case_) model.to(snake_case_) model.eval() with torch.no_grad(): __lowerCAmelCase : List[Any] = model(**self._prepare_for_class(snake_case_ , snake_case_)) __lowerCAmelCase : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(snake_case_) , self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] __lowerCAmelCase : Any = True __lowerCAmelCase : int = model_class(snake_case_) model.to(snake_case_) model.eval() with torch.no_grad(): __lowerCAmelCase : str = model(**self._prepare_for_class(snake_case_ , snake_case_)) __lowerCAmelCase : List[Any] = outputs.encoder_attentions self.assertEqual(len(snake_case_) , self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) __lowerCAmelCase : Union[str, Any] = len(snake_case_) __lowerCAmelCase : Optional[Any] = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(snake_case_ , snake_case_) # decoder attentions __lowerCAmelCase : Dict = outputs.decoder_attentions self.assertIsInstance(snake_case_ , (list, tuple)) self.assertEqual(len(snake_case_) , self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions __lowerCAmelCase : List[str] = outputs.cross_attentions self.assertIsInstance(snake_case_ , (list, tuple)) self.assertEqual(len(snake_case_) , self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine __lowerCAmelCase : Optional[int] = True __lowerCAmelCase : str = True __lowerCAmelCase : Union[str, Any] = model_class(snake_case_) model.to(snake_case_) model.eval() with torch.no_grad(): __lowerCAmelCase : str = model(**self._prepare_for_class(snake_case_ , snake_case_)) self.assertEqual(out_len + 2 , len(snake_case_)) __lowerCAmelCase : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(snake_case_) , self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def _SCREAMING_SNAKE_CASE ( self: Dict) -> Union[str, Any]: """simple docstring""" super().test_retain_grad_hidden_states_attentions() def _lowercase ( __snake_case="train-batch.pt" ) -> List[str]: __lowerCAmelCase : Any = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" ,filename=__lowercase ,repo_type="dataset" ) __lowerCAmelCase : Dict = torch.load(__lowercase ,map_location=__lowercase ) return batch @require_torch @slow class A__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Any: """simple docstring""" __lowerCAmelCase : Optional[int] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly").to(snake_case_) __lowerCAmelCase : Union[str, Any] = prepare_batch() with torch.no_grad(): __lowerCAmelCase : Any = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0] __lowerCAmelCase : Tuple = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size)) self.assertEqual(output.shape , snake_case_) __lowerCAmelCase : List[Any] = torch.tensor( [[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=snake_case_) self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_)) def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[str]: """simple docstring""" __lowerCAmelCase : str = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly").to(snake_case_) __lowerCAmelCase : List[str] = prepare_batch("val-batch.pt") with torch.no_grad(): __lowerCAmelCase : int = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state __lowerCAmelCase : int = torch.Size((64, model.config.context_length, model.config.d_model)) self.assertEqual(output.shape , snake_case_) __lowerCAmelCase : Tuple = torch.tensor( [[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=snake_case_) self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_)) def _SCREAMING_SNAKE_CASE ( self: Tuple) -> List[Any]: """simple docstring""" __lowerCAmelCase : Optional[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly").to(snake_case_) __lowerCAmelCase : Tuple = prepare_batch("val-batch.pt") with torch.no_grad(): __lowerCAmelCase : Any = model.generate( static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , ) __lowerCAmelCase : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length)) self.assertEqual(outputs.sequences.shape , snake_case_) __lowerCAmelCase : Dict = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=snake_case_) __lowerCAmelCase : Union[str, Any] = outputs.sequences.mean(dim=1) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1))
269
'''simple docstring''' import os from datetime import datetime as dt from github import Github __SCREAMING_SNAKE_CASE :str = [ '''good first issue''', '''feature request''', '''wip''', ] def UpperCAmelCase_ ( ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase = Github(os.environ["GITHUB_TOKEN"] ) _UpperCAmelCase = g.get_repo("huggingface/accelerate" ) _UpperCAmelCase = repo.get_issues(state="open" ) for issue in open_issues: _UpperCAmelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowercase : i.created_at , reverse=__lowercase ) _UpperCAmelCase = comments[0] if len(__lowercase ) > 0 else None _UpperCAmelCase = dt.utcnow() _UpperCAmelCase = (current_time - issue.updated_at).days _UpperCAmelCase = (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state="closed" ) elif ( days_since_updated > 23 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Add stale comment issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) if __name__ == "__main__": main()
22
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class A_ ( lowerCAmelCase_ ): '''simple docstring''' _UpperCamelCase : List[Any] = """mobilenet_v2""" def __init__( self , snake_case=3 , snake_case=224 , snake_case=1.0 , snake_case=8 , snake_case=8 , snake_case=6 , snake_case=32 , snake_case=True , snake_case=True , snake_case="relu6" , snake_case=True , snake_case=0.8 , snake_case=0.02 , snake_case=0.001 , snake_case=255 , **snake_case , ): super().__init__(**snake_case_ ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) lowercase = num_channels lowercase = image_size lowercase = depth_multiplier lowercase = depth_divisible_by lowercase = min_depth lowercase = expand_ratio lowercase = output_stride lowercase = first_layer_is_expansion lowercase = finegrained_output lowercase = hidden_act lowercase = tf_padding lowercase = classifier_dropout_prob lowercase = initializer_range lowercase = layer_norm_eps lowercase = semantic_loss_ignore_index class A_ ( lowerCAmelCase_ ): '''simple docstring''' _UpperCamelCase : Optional[int] = version.parse("""1.11""" ) @property def SCREAMING_SNAKE_CASE__ ( self ): return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def SCREAMING_SNAKE_CASE__ ( self ): if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def SCREAMING_SNAKE_CASE__ ( self ): return 1E-4
195
'''simple docstring''' import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( "files" , [ ["full:README.md", "dataset_infos.json"], ["empty:README.md", "dataset_infos.json"], ["dataset_infos.json"], ["full:README.md"], ] , ) def UpperCAmelCase_ ( __lowercase : Any , __lowercase : int ) -> int: '''simple docstring''' _UpperCAmelCase = tmp_path_factory.mktemp("dset_infos_dir" ) if "full:README.md" in files: with open(dataset_infos_dir / "README.md" , "w" ) as f: f.write("---\ndataset_info:\n dataset_size: 42\n---" ) if "empty:README.md" in files: with open(dataset_infos_dir / "README.md" , "w" ) as f: f.write("" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f: f.write("{\"default\": {\"dataset_size\": 42}}" ) _UpperCAmelCase = DatasetInfosDict.from_directory(__lowercase ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( "dataset_info" , [ DatasetInfo(), DatasetInfo( description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ), ] , ) def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : DatasetInfo ) -> Any: '''simple docstring''' _UpperCAmelCase = str(__lowercase ) dataset_info.write_to_directory(__lowercase ) _UpperCAmelCase = DatasetInfo.from_directory(__lowercase ) assert dataset_info == reloaded assert os.path.exists(os.path.join(__lowercase , "dataset_info.json" ) ) def UpperCAmelCase_ ( ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase = DatasetInfo( description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) _UpperCAmelCase = dataset_info._to_yaml_dict() assert sorted(__lowercase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) _UpperCAmelCase = yaml.safe_dump(__lowercase ) _UpperCAmelCase = yaml.safe_load(__lowercase ) assert dataset_info_yaml_dict == reloaded def UpperCAmelCase_ ( ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = DatasetInfo() _UpperCAmelCase = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( "dataset_infos_dict" , [ DatasetInfosDict(), DatasetInfosDict({"default": DatasetInfo()} ), DatasetInfosDict({"my_config_name": DatasetInfo()} ), DatasetInfosDict( { "default": DatasetInfo( description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ) } ), DatasetInfosDict( { "v1": DatasetInfo(dataset_size=42 ), "v2": DatasetInfo(dataset_size=1337 ), } ), ] , ) def UpperCAmelCase_ ( __lowercase : int , __lowercase : DatasetInfosDict ) -> Dict: '''simple docstring''' _UpperCAmelCase = str(__lowercase ) dataset_infos_dict.write_to_directory(__lowercase ) _UpperCAmelCase = DatasetInfosDict.from_directory(__lowercase ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): _UpperCAmelCase = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml _UpperCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(__lowercase , "README.md" ) )
22
0
"""simple docstring""" from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def lowercase () -> Any: '''simple docstring''' lowerCAmelCase = { """repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""], """path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""], """content""": ["""a """ * 20, """a """ * 30, """b """ * 7], } lowerCAmelCase = Dataset.from_dict(__lowercase ) return dataset class SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ ): def __lowercase ( self : Union[str, Any] ): lowerCAmelCase = get_dataset() lowerCAmelCase = make_duplicate_clusters(snake_case_ , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def __lowercase ( self : int ): lowerCAmelCase = get_dataset() lowerCAmelCase , lowerCAmelCase = deduplicate_dataset(snake_case_ ) self.assertEqual(len(snake_case_ ) , 2 ) print(snake_case_ ) self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 ) self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , snake_case_ )
155
'''simple docstring''' def UpperCAmelCase_ ( __lowercase : str ) -> str: '''simple docstring''' return " ".join( "".join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('''Hey wollef sroirraw'''))
22
0
"""simple docstring""" import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class a ( lowerCAmelCase_, lowerCAmelCase_ ): @register_to_config def __init__( self , *, _lowerCamelCase = 4 , _lowerCamelCase = 7_6_8 , _lowerCamelCase , _lowerCamelCase , ): super().__init__() lowercase = nn.Parameter(torch.zeros(snake_case_ ) ) # parameters for additional clip time embeddings lowercase = nn.Linear(snake_case_ , snake_case_ ) lowercase = nn.Linear(snake_case_ , snake_case_ ) # parameters for encoder hidden states lowercase = clip_extra_context_tokens lowercase = nn.Linear( snake_case_ , self.clip_extra_context_tokens * cross_attention_dim ) lowercase = nn.Linear(snake_case_ , snake_case_ ) lowercase = nn.LayerNorm(snake_case_ ) def UpperCamelCase_ ( self , *, _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings lowercase = image_embeddings.shape[0] lowercase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) lowercase = classifier_free_guidance_embeddings.expand( snake_case_ , -1 ) lowercase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] lowercase = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... lowercase = self.embedding_proj(snake_case_ ) lowercase = self.clip_image_embeddings_project_to_time_embeddings(snake_case_ ) lowercase = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" lowercase = self.clip_extra_context_tokens_proj(snake_case_ ) lowercase = clip_extra_context_tokens.reshape(snake_case_ , -1 , self.clip_extra_context_tokens ) lowercase = clip_extra_context_tokens.permute(0 , 2 , 1 ) lowercase = self.encoder_hidden_states_proj(snake_case_ ) lowercase = self.text_encoder_hidden_states_norm(snake_case_ ) lowercase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
220
'''simple docstring''' def UpperCAmelCase_ ( __lowercase : str ) -> list: '''simple docstring''' if n_term == "": return [] _UpperCAmelCase = [] for temp in range(int(__lowercase ) ): series.append(f'1/{temp + 1}' if series else "1" ) return series if __name__ == "__main__": __SCREAMING_SNAKE_CASE :str = input('''Enter the last number (nth term) of the Harmonic Series''') print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''') print(harmonic_series(nth_term))
22
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase = { '''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''], '''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ['''BertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ '''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BertForMaskedLM''', '''BertForMultipleChoice''', '''BertForNextSentencePrediction''', '''BertForPreTraining''', '''BertForQuestionAnswering''', '''BertForSequenceClassification''', '''BertForTokenClassification''', '''BertLayer''', '''BertLMHeadModel''', '''BertModel''', '''BertPreTrainedModel''', '''load_tf_weights_in_bert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ '''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFBertEmbeddings''', '''TFBertForMaskedLM''', '''TFBertForMultipleChoice''', '''TFBertForNextSentencePrediction''', '''TFBertForPreTraining''', '''TFBertForQuestionAnswering''', '''TFBertForSequenceClassification''', '''TFBertForTokenClassification''', '''TFBertLMHeadModel''', '''TFBertMainLayer''', '''TFBertModel''', '''TFBertPreTrainedModel''', ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ['''TFBertTokenizer'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ '''FlaxBertForCausalLM''', '''FlaxBertForMaskedLM''', '''FlaxBertForMultipleChoice''', '''FlaxBertForNextSentencePrediction''', '''FlaxBertForPreTraining''', '''FlaxBertForQuestionAnswering''', '''FlaxBertForSequenceClassification''', '''FlaxBertForTokenClassification''', '''FlaxBertModel''', '''FlaxBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
141
'''simple docstring''' import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __SCREAMING_SNAKE_CASE :int = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''') @require_sentencepiece @require_tokenizers class A_ ( lowerCAmelCase_ , unittest.TestCase ): _lowerCamelCase : List[str] = PegasusTokenizer _lowerCamelCase : int = PegasusTokenizerFast _lowerCamelCase : Union[str, Any] = True _lowerCamelCase : List[str] = True def lowercase ( self : Optional[int] ): super().setUp() # We have a SentencePiece fixture for testing _UpperCAmelCase = PegasusTokenizer(snake_case_ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase ( self : Tuple ): return PegasusTokenizer.from_pretrained("google/pegasus-large" ) def lowercase ( self : Union[str, Any] , **snake_case_ : Union[str, Any] ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def lowercase ( self : Tuple , snake_case_ : Any ): return ("This is a test", "This is a test") def lowercase ( self : Optional[int] ): _UpperCAmelCase = "</s>" _UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ ) def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "</s>" ) self.assertEqual(vocab_keys[-1] , "v" ) self.assertEqual(len(snake_case_ ) , 1_1_0_3 ) def lowercase ( self : Any ): self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 ) def lowercase ( self : List[Any] ): _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) _UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname ) _UpperCAmelCase = ( "Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important" " </s> <pad> <pad> <pad>" ) _UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0] _UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0] self.assertListEqual(snake_case_ , snake_case_ ) def lowercase ( self : Tuple ): _UpperCAmelCase = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word _UpperCAmelCase = "<mask_1> To ensure a <mask_2> flow of bank resolutions." _UpperCAmelCase = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1] _UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=snake_case_ ).input_ids[0] self.assertListEqual(snake_case_ , snake_case_ ) def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6_1_0_3 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 1_0_3 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1_0_2_4 _UpperCAmelCase = "To ensure a smooth flow of bank resolutions." _UpperCAmelCase = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1] _UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=snake_case_ ).input_ids[0] self.assertListEqual(snake_case_ , snake_case_ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def lowercase ( self : int ): _UpperCAmelCase = ["This is going to be way too long." * 1_5_0, "short example"] _UpperCAmelCase = ["not super long but more than 5 tokens", "tiny"] _UpperCAmelCase = self._large_tokenizer(snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" ) _UpperCAmelCase = self._large_tokenizer( text_target=snake_case_ , max_length=5 , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" ) assert batch.input_ids.shape == (2, 1_0_2_4) assert batch.attention_mask.shape == (2, 1_0_2_4) assert targets["input_ids"].shape == (2, 5) assert len(snake_case_ ) == 2 # input_ids, attention_mask. @slow def lowercase ( self : Dict ): # fmt: off _UpperCAmelCase = {"input_ids": [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case_ , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , ) @require_sentencepiece @require_tokenizers class A_ ( lowerCAmelCase_ , unittest.TestCase ): _lowerCamelCase : List[str] = PegasusTokenizer _lowerCamelCase : List[Any] = PegasusTokenizerFast _lowerCamelCase : int = True _lowerCamelCase : Union[str, Any] = True def lowercase ( self : Any ): super().setUp() # We have a SentencePiece fixture for testing _UpperCAmelCase = PegasusTokenizer(snake_case_ , offset=0 , mask_token_sent=snake_case_ , mask_token="[MASK]" ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase ( self : Tuple ): return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" ) def lowercase ( self : Optional[Any] , **snake_case_ : Dict ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def lowercase ( self : Union[str, Any] , snake_case_ : str ): return ("This is a test", "This is a test") def lowercase ( self : List[str] ): _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) _UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname ) _UpperCAmelCase = ( "Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>" " <pad> <pad> <pad>" ) _UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0] _UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0] self.assertListEqual(snake_case_ , snake_case_ ) @require_torch def lowercase ( self : Tuple ): _UpperCAmelCase = ["This is going to be way too long." * 1_0_0_0, "short example"] _UpperCAmelCase = ["not super long but more than 5 tokens", "tiny"] _UpperCAmelCase = self._large_tokenizer(snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" ) _UpperCAmelCase = self._large_tokenizer( text_target=snake_case_ , max_length=5 , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" ) assert batch.input_ids.shape == (2, 4_0_9_6) assert batch.attention_mask.shape == (2, 4_0_9_6) assert targets["input_ids"].shape == (2, 5) assert len(snake_case_ ) == 2 # input_ids, attention_mask. def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = ( "This is an example string that is used to test the original TF implementation against the HF" " implementation" ) _UpperCAmelCase = self._large_tokenizer(snake_case_ ).input_ids self.assertListEqual( snake_case_ , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
22
0
def SCREAMING_SNAKE_CASE__ ( lowercase = 1000000 ) -> int: snake_case : int = [i - 1 for i in range(limit + 1 )] for i in range(2 ,limit + 1 ): if phi[i] == i - 1: for j in range(2 * i ,limit + 1 ,__lowercase ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
124
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class A_ ( unittest.TestCase ): def lowercase ( self : int ): _UpperCAmelCase = tempfile.mkdtemp() _UpperCAmelCase = BlipImageProcessor() _UpperCAmelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) _UpperCAmelCase = BlipProcessor(snake_case_ , snake_case_ ) processor.save_pretrained(self.tmpdirname ) def lowercase ( self : Tuple , **snake_case_ : int ): return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).tokenizer def lowercase ( self : Dict , **snake_case_ : Any ): return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).image_processor def lowercase ( self : int ): shutil.rmtree(self.tmpdirname ) def lowercase ( self : Optional[Any] ): _UpperCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] _UpperCAmelCase = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase ( self : int ): _UpperCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) _UpperCAmelCase = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 ) _UpperCAmelCase = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case_ ) def lowercase ( self : Any ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = image_processor(snake_case_ , return_tensors="np" ) _UpperCAmelCase = processor(images=snake_case_ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase ( self : Optional[int] ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _UpperCAmelCase = "lower newer" _UpperCAmelCase = processor(text=snake_case_ ) _UpperCAmelCase = tokenizer(snake_case_ , return_token_type_ids=snake_case_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase ( self : Optional[Any] ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _UpperCAmelCase = "lower newer" _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = processor(text=snake_case_ , images=snake_case_ ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(snake_case_ ): processor() def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCAmelCase = processor.batch_decode(snake_case_ ) _UpperCAmelCase = tokenizer.batch_decode(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) def lowercase ( self : str ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _UpperCAmelCase = "lower newer" _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = processor(text=snake_case_ , images=snake_case_ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
22
0
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Dict: '''simple docstring''' UpperCAmelCase = len(__lowercase ) while cur > 1: # Find the maximum number in arr UpperCAmelCase = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi UpperCAmelCase = arr[mi::-1] + arr[mi + 1 : len(__lowercase )] # Reverse whole list UpperCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(__lowercase )] cur -= 1 return arr if __name__ == "__main__": __A : str = input("Enter numbers separated by a comma:\n").strip() __A : List[Any] = [int(item) for item in user_input.split(",")] print(pancake_sort(unsorted))
273
'''simple docstring''' import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def UpperCAmelCase_ ( __lowercase : str ) -> List[str]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = image.size _UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 _UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) _UpperCAmelCase = np.array(__lowercase ).astype(np.floataa ) / 255.0 _UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 ) _UpperCAmelCase = torch.from_numpy(__lowercase ) return 2.0 * image - 1.0 class A_ ( lowerCAmelCase_ ): def __init__( self : Optional[Any] , snake_case_ : VQModel , snake_case_ : UNetaDModel , snake_case_ : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): super().__init__() self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ ) @torch.no_grad() def __call__( self : Any , snake_case_ : Union[torch.Tensor, PIL.Image.Image] = None , snake_case_ : Optional[int] = 1 , snake_case_ : Optional[int] = 1_0_0 , snake_case_ : Optional[float] = 0.0 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ): if isinstance(snake_case_ , PIL.Image.Image ): _UpperCAmelCase = 1 elif isinstance(snake_case_ , torch.Tensor ): _UpperCAmelCase = image.shape[0] else: raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}' ) if isinstance(snake_case_ , PIL.Image.Image ): _UpperCAmelCase = preprocess(snake_case_ ) _UpperCAmelCase , _UpperCAmelCase = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image _UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width) _UpperCAmelCase = next(self.unet.parameters() ).dtype _UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ ) _UpperCAmelCase = image.to(device=self.device , dtype=snake_case_ ) # set timesteps and move to the correct device self.scheduler.set_timesteps(snake_case_ , device=self.device ) _UpperCAmelCase = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler _UpperCAmelCase = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _UpperCAmelCase = {} if accepts_eta: _UpperCAmelCase = eta for t in self.progress_bar(snake_case_ ): # concat latents and low resolution image in the channel dimension. _UpperCAmelCase = torch.cat([latents, image] , dim=1 ) _UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ ) # predict the noise residual _UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample # compute the previous noisy sample x_t -> x_t-1 _UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample # decode the image latents with the VQVAE _UpperCAmelCase = self.vqvae.decode(snake_case_ ).sample _UpperCAmelCase = torch.clamp(snake_case_ , -1.0 , 1.0 ) _UpperCAmelCase = image / 2 + 0.5 _UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _UpperCAmelCase = self.numpy_to_pil(snake_case_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=snake_case_ )
22
0
import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging __a :int = '''\ ''' __a :List[Any] = ''' Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity ''' __a :Tuple = ''' Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to \'cuda\' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric("perplexity") >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"] >>> results = perplexity.compute(model_id=\'gpt2\', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) [\'perplexities\', \'mean_perplexity\'] >>> print(round(results["mean_perplexity"], 2)) 78.22 >>> print(round(results["perplexities"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric("perplexity") >>> input_texts = datasets.load_dataset("wikitext", ... "wikitext-2-raw-v1", ... split="test")["text"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=\'\'] >>> results = perplexity.compute(model_id=\'gpt2\', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) [\'perplexities\', \'mean_perplexity\'] >>> print(round(results["mean_perplexity"], 2)) 60.35 >>> print(round(results["perplexities"][0], 2)) 81.12 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _a ( datasets.Metric ): """simple docstring""" def __A ( self : int ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "input_texts": datasets.Value("string" ), } ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , ) def __A ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int = 16 , UpperCAmelCase : bool = True , UpperCAmelCase : int=None ): if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": A_ = "cuda" else: A_ = "cuda" if torch.cuda.is_available() else "cpu" A_ = AutoModelForCausalLM.from_pretrained(snake_case_ ) A_ = model.to(snake_case_ ) A_ = AutoTokenizer.from_pretrained(snake_case_ ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: A_ = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(snake_case_ ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" A_ = model.config.max_length - 1 else: A_ = model.config.max_length A_ = tokenizer( snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , return_tensors="pt" , return_attention_mask=snake_case_ , ).to(snake_case_ ) A_ = encodings["input_ids"] A_ = encodings["attention_mask"] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." A_ = [] A_ = CrossEntropyLoss(reduction="none" ) for start_index in logging.tqdm(range(0 , len(snake_case_ ) , snake_case_ ) ): A_ = min(start_index + batch_size , len(snake_case_ ) ) A_ = encoded_texts[start_index:end_index] A_ = attn_masks[start_index:end_index] if add_start_token: A_ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(snake_case_ ) A_ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) A_ = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(snake_case_ ), attn_mask] , dim=1 ) A_ = encoded_batch with torch.no_grad(): A_ = model(snake_case_ , attention_mask=snake_case_ ).logits A_ = out_logits[..., :-1, :].contiguous() A_ = labels[..., 1:].contiguous() A_ = attn_mask[..., 1:].contiguous() A_ = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , snake_case_ ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(snake_case_ )}
312
'''simple docstring''' import string from math import logaa def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> int: '''simple docstring''' _UpperCAmelCase = document.translate( str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" ) _UpperCAmelCase = document_without_punctuation.split(" " ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> tuple[int, int]: '''simple docstring''' _UpperCAmelCase = corpus.lower().translate( str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with '' _UpperCAmelCase = corpus_without_punctuation.split("\n" ) _UpperCAmelCase = term.lower() return (len([doc for doc in docs if term in doc] ), len(__lowercase )) def UpperCAmelCase_ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any]=False ) -> float: '''simple docstring''' if smoothing: if n == 0: raise ValueError("log10(0) is undefined." ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("df must be > 0" ) elif n == 0: raise ValueError("log10(0) is undefined." ) return round(logaa(n / df ) , 3 ) def UpperCAmelCase_ ( __lowercase : int , __lowercase : int ) -> float: '''simple docstring''' return round(tf * idf , 3 )
22
0
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) def _lowercase ( lowercase__ ): __lowerCAmelCase : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.0_0_1 ) if "_quant" in model_name: raise ValueError('''Quantized models are not supported.''' ) __lowerCAmelCase : List[Any] = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , __lowercase ) if matches: __lowerCAmelCase : Union[str, Any] = float(matches[1] ) __lowerCAmelCase : Dict = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". __lowerCAmelCase : int = 1_0_0_1 __lowerCAmelCase : Dict = '''imagenet-1k-id2label.json''' __lowerCAmelCase : List[str] = '''huggingface/label-files''' __lowerCAmelCase : Any = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) ) __lowerCAmelCase : Optional[int] = {int(__lowercase ) + 1: v for k, v in idalabel.items()} __lowerCAmelCase : Optional[int] = '''background''' __lowerCAmelCase : List[Any] = idalabel __lowerCAmelCase : Optional[Any] = {v: k for k, v in idalabel.items()} return config def _lowercase ( ): __lowerCAmelCase : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowerCAmelCase : Tuple = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im @torch.no_grad() def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__=False ): __lowerCAmelCase : Union[str, Any] = get_mobilenet_va_config(__lowercase ) # Load 🤗 model __lowerCAmelCase : int = MobileNetVaForImageClassification(__lowercase ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(__lowercase , __lowercase , __lowercase ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor __lowerCAmelCase : Tuple = MobileNetVaImageProcessor( crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 3_2} , ) __lowerCAmelCase : Any = image_processor(images=prepare_img() , return_tensors='''pt''' ) __lowerCAmelCase : Optional[Any] = model(**__lowercase ) __lowerCAmelCase : Union[str, Any] = outputs.logits assert logits.shape == (1, 1_0_0_1) if model_name == "mobilenet_v1_1.0_224": __lowerCAmelCase : List[Any] = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ) elif model_name == "mobilenet_v1_0.75_192": __lowerCAmelCase : Optional[int] = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] ) else: __lowerCAmelCase : Union[str, Any] = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , __lowercase , atol=1E-4 ) Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowercase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__lowercase ) if push_to_hub: print('''Pushing to the hub...''' ) __lowerCAmelCase : int = '''google/''' + model_name image_processor.push_to_hub(__lowercase ) model.push_to_hub(__lowercase ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="mobilenet_v1_1.0_224", type=str, help="Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.", ) parser.add_argument( "--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)." ) parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _UpperCamelCase = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
275
'''simple docstring''' from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
22
0
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ): lowercase = BarthezTokenizer lowercase = BarthezTokenizerFast lowercase = True lowercase = True def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' super().setUp() lowercase_ : Tuple = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ,legacy_format=snake_case_ ) lowercase_ : str = tokenizer def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Optional[Any] = '<pad>' lowercase_ : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) ,snake_case_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) ,snake_case_ ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,'<s>' ) self.assertEqual(vocab_keys[1] ,'<pad>' ) self.assertEqual(vocab_keys[-1] ,'<mask>' ) self.assertEqual(len(snake_case_ ) ,10_1122 ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size ,10_1122 ) @require_torch def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] lowercase_ : List[str] = [0, 57, 3018, 7_0307, 91, 2] lowercase_ : List[str] = self.tokenizer( snake_case_ ,max_length=len(snake_case_ ) ,padding=snake_case_ ,truncation=snake_case_ ,return_tensors='pt' ) self.assertIsInstance(snake_case_ ,snake_case_ ) self.assertEqual((2, 6) ,batch.input_ids.shape ) self.assertEqual((2, 6) ,batch.attention_mask.shape ) lowercase_ : Tuple = batch.input_ids.tolist()[0] self.assertListEqual(snake_case_ ,snake_case_ ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' if not self.test_rust_tokenizer: return lowercase_ : str = self.get_tokenizer() lowercase_ : Any = self.get_rust_tokenizer() lowercase_ : Optional[Any] = 'I was born in 92000, and this is falsé.' lowercase_ : Optional[int] = tokenizer.tokenize(snake_case_ ) lowercase_ : List[Any] = rust_tokenizer.tokenize(snake_case_ ) self.assertListEqual(snake_case_ ,snake_case_ ) lowercase_ : Any = tokenizer.encode(snake_case_ ,add_special_tokens=snake_case_ ) lowercase_ : Tuple = rust_tokenizer.encode(snake_case_ ,add_special_tokens=snake_case_ ) self.assertListEqual(snake_case_ ,snake_case_ ) lowercase_ : Any = self.get_rust_tokenizer() lowercase_ : str = tokenizer.encode(snake_case_ ) lowercase_ : str = rust_tokenizer.encode(snake_case_ ) self.assertListEqual(snake_case_ ,snake_case_ ) @slow def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Optional[int] = {'input_ids': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. lowercase_ : str = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=snake_case_ ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=snake_case_ ,)
213
'''simple docstring''' def UpperCAmelCase_ ( __lowercase : int ) -> int: '''simple docstring''' if not isinstance(__lowercase , __lowercase ) or number < 0: raise ValueError("Input must be a non-negative integer" ) _UpperCAmelCase = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
22
0
'''simple docstring''' import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer lowercase_ = logging.get_logger(__name__) class __A ( lowerCAmelCase_ ): '''simple docstring''' __lowerCamelCase : Optional[Any] = """AutoTokenizer""" __lowerCamelCase : int = ["""tokenizer"""] __lowerCamelCase : Dict = { """semantic_prompt""": 1, """coarse_prompt""": 2, """fine_prompt""": 2, } def __init__(self , A , A=None ) -> Any: """simple docstring""" super().__init__(snake_case_ ) _a = speaker_embeddings @classmethod def a__ (cls , A , A="speaker_embeddings_path.json" , **A ) -> Optional[Any]: """simple docstring""" if speaker_embeddings_dict_path is not None: _a = get_file_from_repo( snake_case_ , snake_case_ , subfolder=kwargs.pop('''subfolder''' , snake_case_ ) , cache_dir=kwargs.pop('''cache_dir''' , snake_case_ ) , force_download=kwargs.pop('''force_download''' , snake_case_ ) , proxies=kwargs.pop('''proxies''' , snake_case_ ) , resume_download=kwargs.pop('''resume_download''' , snake_case_ ) , local_files_only=kwargs.pop('''local_files_only''' , snake_case_ ) , use_auth_token=kwargs.pop('''use_auth_token''' , snake_case_ ) , revision=kwargs.pop('''revision''' , snake_case_ ) , ) if speaker_embeddings_path is None: logger.warning( f'''`{os.path.join(snake_case_ , snake_case_ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' ) _a = None else: with open(snake_case_ ) as speaker_embeddings_json: _a = json.load(snake_case_ ) else: _a = None _a = AutoTokenizer.from_pretrained(snake_case_ , **snake_case_ ) return cls(tokenizer=snake_case_ , speaker_embeddings=snake_case_ ) def a__ (self , A , A="speaker_embeddings_path.json" , A="speaker_embeddings" , A = False , **A , ) -> Tuple: """simple docstring""" if self.speaker_embeddings is not None: os.makedirs(os.path.join(snake_case_ , snake_case_ , '''v2''' ) , exist_ok=snake_case_ ) _a = {} _a = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": _a = self._load_voice_preset(snake_case_ ) _a = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['''repo_or_path'''] , snake_case_ , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=snake_case_ , ) _a = os.path.join(snake_case_ , f'''{prompt_key}_{key}.npy''' ) _a = tmp_dict with open(os.path.join(snake_case_ , snake_case_ ) , '''w''' ) as fp: json.dump(snake_case_ , snake_case_ ) super().save_pretrained(snake_case_ , snake_case_ , **snake_case_ ) def a__ (self , A = None , **A ) -> List[Any]: """simple docstring""" _a = self.speaker_embeddings[voice_preset] _a = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' ) _a = get_file_from_repo( self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , snake_case_ ) , cache_dir=kwargs.pop('''cache_dir''' , snake_case_ ) , force_download=kwargs.pop('''force_download''' , snake_case_ ) , proxies=kwargs.pop('''proxies''' , snake_case_ ) , resume_download=kwargs.pop('''resume_download''' , snake_case_ ) , local_files_only=kwargs.pop('''local_files_only''' , snake_case_ ) , use_auth_token=kwargs.pop('''use_auth_token''' , snake_case_ ) , revision=kwargs.pop('''revision''' , snake_case_ ) , ) if path is None: raise ValueError( f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.''' ) _a = np.load(snake_case_ ) return voice_preset_dict def a__ (self , A = None ) -> List[str]: """simple docstring""" for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) def __call__(self , A=None , A=None , A="pt" , A=256 , A=False , A=True , A=False , **A , ) -> int: """simple docstring""" if voice_preset is not None and not isinstance(snake_case_ , snake_case_ ): if ( isinstance(snake_case_ , snake_case_ ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): _a = self._load_voice_preset(snake_case_ ) else: if isinstance(snake_case_ , snake_case_ ) and not voice_preset.endswith('''.npz''' ): _a = voice_preset + '''.npz''' _a = np.load(snake_case_ ) if voice_preset is not None: self._validate_voice_preset_dict(snake_case_ , **snake_case_ ) _a = BatchFeature(data=snake_case_ , tensor_type=snake_case_ ) _a = self.tokenizer( snake_case_ , return_tensors=snake_case_ , padding='''max_length''' , max_length=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , add_special_tokens=snake_case_ , **snake_case_ , ) if voice_preset is not None: _a = voice_preset return encoded_text
211
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar __SCREAMING_SNAKE_CASE :Optional[int] = TypeVar('''T''') class A_ ( Generic[T] ): def __init__( self : List[Any] , snake_case_ : list[T] , snake_case_ : Callable[[T, T], T] ): _UpperCAmelCase = None _UpperCAmelCase = len(snake_case_ ) _UpperCAmelCase = [any_type for _ in range(self.N )] + arr _UpperCAmelCase = fnc self.build() def lowercase ( self : List[Any] ): for p in range(self.N - 1 , 0 , -1 ): _UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : T ): p += self.N _UpperCAmelCase = v while p > 1: _UpperCAmelCase = p // 2 _UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def lowercase ( self : Any , snake_case_ : int , snake_case_ : int ): # noqa: E741 _UpperCAmelCase , _UpperCAmelCase = l + self.N, r + self.N _UpperCAmelCase = None while l <= r: if l % 2 == 1: _UpperCAmelCase = self.st[l] if res is None else self.fn(snake_case_ , self.st[l] ) if r % 2 == 0: _UpperCAmelCase = self.st[r] if res is None else self.fn(snake_case_ , self.st[r] ) _UpperCAmelCase , _UpperCAmelCase = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce __SCREAMING_SNAKE_CASE :Union[str, Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] __SCREAMING_SNAKE_CASE :List[str] = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } __SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, min) __SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, max) __SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, lambda a, b: a + b) def UpperCAmelCase_ ( ) -> None: '''simple docstring''' for i in range(len(__lowercase ) ): for j in range(__lowercase , len(__lowercase ) ): _UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] ) _UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] ) _UpperCAmelCase = reduce(lambda __lowercase , __lowercase : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(__lowercase , __lowercase ) assert max_range == max_segment_tree.query(__lowercase , __lowercase ) assert sum_range == sum_segment_tree.query(__lowercase , __lowercase ) test_all_segments() for index, value in test_updates.items(): __SCREAMING_SNAKE_CASE :str = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
22
0
"""simple docstring""" __snake_case : int = { '''meter''': '''m''', '''kilometer''': '''km''', '''megametre''': '''Mm''', '''gigametre''': '''Gm''', '''terametre''': '''Tm''', '''petametre''': '''Pm''', '''exametre''': '''Em''', '''zettametre''': '''Zm''', '''yottametre''': '''Ym''', } # Exponent of the factor(meter) __snake_case : List[Any] = { '''m''': 0, '''km''': 3, '''Mm''': 6, '''Gm''': 9, '''Tm''': 12, '''Pm''': 15, '''Em''': 18, '''Zm''': 21, '''Ym''': 24, } def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> float: __lowerCAmelCase : List[Any] = from_type.lower().strip("s" ) __lowerCAmelCase : int = to_type.lower().strip("s" ) __lowerCAmelCase : str = UNIT_SYMBOL.get(__lowercase ,__lowercase ) __lowerCAmelCase : List[Any] = UNIT_SYMBOL.get(__lowercase ,__lowercase ) if from_sanitized not in METRIC_CONVERSION: __lowerCAmelCase : List[str] = ( F"""Invalid \'from_type\' value: {from_type!r}.\n""" F"""Conversion abbreviations are: {', '.join(__lowercase )}""" ) raise ValueError(__lowercase ) if to_sanitized not in METRIC_CONVERSION: __lowerCAmelCase : int = ( F"""Invalid \'to_type\' value: {to_type!r}.\n""" F"""Conversion abbreviations are: {', '.join(__lowercase )}""" ) raise ValueError(__lowercase ) __lowerCAmelCase : List[Any] = METRIC_CONVERSION[from_sanitized] __lowerCAmelCase : Dict = METRIC_CONVERSION[to_sanitized] __lowerCAmelCase : Dict = 1 if from_exponent > to_exponent: __lowerCAmelCase : int = from_exponent - to_exponent else: __lowerCAmelCase : Optional[int] = -(to_exponent - from_exponent) return value * pow(10 ,__lowercase ) if __name__ == "__main__": from doctest import testmod testmod()
269
'''simple docstring''' import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( "kwargs, expected" , [ ({"num_shards": 0, "max_num_jobs": 1}, []), ({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]), ({"num_shards": 10, "max_num_jobs": 10}, [range(__lowercase , i + 1 ) for i in range(10 )]), ({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]), ({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = _distribute_shards(**__lowercase ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, max_num_jobs, expected" , [ ({"foo": 0}, 10, [{"foo": 0}]), ({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]), ({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]), ({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]), ({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]), ] , ) def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : Optional[Any] , __lowercase : int ) -> str: '''simple docstring''' _UpperCAmelCase = _split_gen_kwargs(__lowercase , __lowercase ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, expected" , [ ({"foo": 0}, 1), ({"shards": [0]}, 1), ({"shards": [0, 1, 2, 3]}, 4), ({"shards": [0, 1, 2, 3], "foo": 0}, 4), ({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4), ({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError), ] , ) def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : List[Any] ) -> List[Any]: '''simple docstring''' if expected is RuntimeError: with pytest.raises(__lowercase ): _number_of_shards_in_gen_kwargs(__lowercase ) else: _UpperCAmelCase = _number_of_shards_in_gen_kwargs(__lowercase ) assert out == expected
22
0
import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() UpperCAmelCase = 2 class A_ : '''simple docstring''' def __init__( self , *, # begin keyword-only arguments snake_case="<s>" , snake_case="<pad>" , snake_case="</s>" , snake_case="<unk>" , snake_case=None , ): lowercase , lowercase , lowercase , lowercase = bos, unk, pad, eos lowercase = [] lowercase = [] lowercase = {} lowercase = self.add_symbol(snake_case_ ) lowercase = self.add_symbol(snake_case_ ) lowercase = self.add_symbol(snake_case_ ) lowercase = self.add_symbol(snake_case_ ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(snake_case_ ) lowercase = len(self.symbols ) def __eq__( self , snake_case ): return self.indices == other.indices def __getitem__( self , snake_case ): if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self ): return len(self.symbols ) def __contains__( self , snake_case ): return sym in self.indices @classmethod def SCREAMING_SNAKE_CASE__ ( cls , snake_case ): lowercase = cls() d.add_from_file(snake_case_ ) return d def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=1 , snake_case=False ): if word in self.indices and not overwrite: lowercase = self.indices[word] lowercase = self.count[idx] + n return idx else: lowercase = len(self.symbols ) lowercase = idx self.symbols.append(snake_case_ ) self.count.append(snake_case_ ) return idx def SCREAMING_SNAKE_CASE__ ( self , snake_case ): return 0 def SCREAMING_SNAKE_CASE__ ( self , snake_case ): if isinstance(snake_case_ , snake_case_ ): try: with open(snake_case_ , 'r' , encoding='utf-8' ) as fd: self.add_from_file(snake_case_ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(snake_case_ ) ) return lowercase = f.readlines() lowercase = self._load_meta(snake_case_ ) for line in lines[indices_start_line:]: try: lowercase , lowercase = line.rstrip().rsplit(' ' , 1 ) if field == "#fairseq:overwrite": lowercase = True lowercase , lowercase = line.rsplit(' ' , 1 ) else: lowercase = False lowercase = int(snake_case_ ) lowercase = line if word in self and not overwrite: raise RuntimeError( 'Duplicate word found when loading Dictionary: \'{}\'. ' 'Duplicate words can overwrite earlier ones by adding the ' '#fairseq:overwrite flag at the end of the corresponding row ' 'in the dictionary file. If using the Camembert model, please ' 'download an updated copy of the model file.'.format(snake_case_ ) ) self.add_symbol(snake_case_ , n=snake_case_ , overwrite=snake_case_ ) except ValueError: raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' ) def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): lowercase = dict((re.sub(r'@@$' , '' , __lowercase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , __lowercase ), v) for k, v in d.items() ) lowercase = '<s> <pad> </s> <unk>'.split() # restore the special tokens for k in keep_keys: del da[F'''{k}</w>'''] lowercase = d[k] # restore return da def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): if not os.path.exists(__lowercase ): raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' ) os.makedirs(__lowercase , exist_ok=__lowercase ) print(F'''Writing results to {pytorch_dump_folder_path}''' ) # handle various types of models lowercase = os.path.join(__lowercase , 'checkpoint.pt' ) if not os.path.isfile(__lowercase ): raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' ) lowercase = torch.load(__lowercase , map_location='cpu' ) lowercase = chkpt['cfg']['model'] # dicts lowercase = os.path.join(__lowercase , 'dict.txt' ) if not os.path.isfile(__lowercase ): raise ValueError(F'''path to the file {dict_file} does not exist!''' ) lowercase = Dictionary.load(__lowercase ) lowercase = rewrite_dict_keys(src_dict.indices ) lowercase = len(__lowercase ) lowercase = os.path.join(__lowercase , VOCAB_FILES_NAMES['vocab_file'] ) print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' ) with open(__lowercase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(__lowercase , ensure_ascii=__lowercase , indent=__lowercase ) ) # merges_file (bpecodes) lowercase = os.path.join(__lowercase , 'bpecodes' ) if not os.path.isfile(__lowercase ): raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' ) lowercase = os.path.join(__lowercase , VOCAB_FILES_NAMES['merges_file'] ) shutil.copyfile(__lowercase , __lowercase ) # model config lowercase = os.path.join(__lowercase , 'config.json' ) lowercase = { 'activation_dropout': args['activation_dropout'], 'architectures': ['BioGptForCausalLM'], 'attention_probs_dropout_prob': args['attention_dropout'], 'bos_token_id': 0, 'eos_token_id': 2, 'hidden_act': args['activation_fn'], 'hidden_dropout_prob': args['dropout'], 'hidden_size': args['decoder_embed_dim'], 'initializer_range': 0.02, 'intermediate_size': args['decoder_ffn_embed_dim'], 'layer_norm_eps': 1e-12, 'layerdrop': args['decoder_layerdrop'], 'max_position_embeddings': args['max_target_positions'], 'model_type': 'biogpt', 'num_attention_heads': args['decoder_attention_heads'], 'num_hidden_layers': args['decoder_layers'], 'pad_token_id': 1, 'scale_embedding': not args['no_scale_embedding'], 'tie_word_embeddings': args['share_decoder_input_output_embed'], 'vocab_size': src_vocab_size, } # good hparam defaults to start with print(F'''Generating {biogpt_model_config_file}''' ) with open(__lowercase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(__lowercase , ensure_ascii=__lowercase , indent=__lowercase ) ) # tokenizer config lowercase = os.path.join(__lowercase , __lowercase ) lowercase = { 'bos_token': '<s>', 'eos_token': '</s>', 'model_max_length': 1024, 'pad_token': '<pad>', 'special_tokens_map_file': None, 'tokenizer_class': 'BioGptTokenizer', 'unk_token': '<unk>', } print(F'''Generating {biogpt_tokenizer_config_file}''' ) with open(__lowercase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(__lowercase , ensure_ascii=__lowercase , indent=__lowercase ) ) # model lowercase = chkpt['model'] # remove unneeded keys lowercase = [ 'decoder.version', ] for k in ignore_keys: model_state_dict.pop(__lowercase , __lowercase ) lowercase = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith('output_projection.weight' ): lowercase = model_state_dict.pop(__lowercase ) else: lowercase = model_state_dict.pop(__lowercase ) lowercase = BioGptConfig.from_pretrained(__lowercase ) lowercase = BioGptForCausalLM(__lowercase ) # check that it loads ok model_new.load_state_dict(__lowercase ) # save lowercase = os.path.join(__lowercase , __lowercase ) print(F'''Generating {pytorch_weights_dump_path}''' ) torch.save(__lowercase , __lowercase ) print('Conversion is done!' ) if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--biogpt_checkpoint_path''', default=None, type=str, required=True, help=( '''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,''' ''' bpecodes, etc.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCAmelCase = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
195
'''simple docstring''' import math def UpperCAmelCase_ ( __lowercase : int ) -> bool: '''simple docstring''' return math.sqrt(__lowercase ) * math.sqrt(__lowercase ) == num def UpperCAmelCase_ ( __lowercase : int ) -> bool: '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = n while left <= right: _UpperCAmelCase = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: _UpperCAmelCase = mid - 1 else: _UpperCAmelCase = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
22
0
"""simple docstring""" from __future__ import annotations import requests a = set( 'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split() ) def lowercase (snake_case__ : str , snake_case__ : int = 1 , snake_case__ : str = "new" , snake_case__ : list | None = None ) -> dict: '''simple docstring''' lowerCAmelCase = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(__lowercase ) - valid_terms ) ): lowerCAmelCase = f'''Invalid search term: {invalid_search_terms}''' raise ValueError(__lowercase ) lowerCAmelCase = requests.get( f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={"""User-agent""": """A random string"""} , ) if response.status_code == 429: raise requests.HTTPError lowerCAmelCase = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(__lowercase )} lowerCAmelCase = {} for id_ in range(__lowercase ): lowerCAmelCase = { item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
155
'''simple docstring''' import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin __SCREAMING_SNAKE_CASE :Dict = 1e-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class A_ : def __init__( self : List[Any] , snake_case_ : int , snake_case_ : Dict=1_6 , snake_case_ : Dict=1_3 , snake_case_ : int=7 , snake_case_ : Any=1_4 , snake_case_ : int=1_0 , snake_case_ : Any=1_9 , snake_case_ : int=5 , snake_case_ : Any=4 , snake_case_ : Tuple=True , snake_case_ : Optional[int]=1_6 , snake_case_ : List[str]=2 , snake_case_ : Any=4 , snake_case_ : List[Any]=4 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Optional[int]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Tuple=[1, 2, 3, 4, 5] , snake_case_ : str=2_5 , snake_case_ : Any=5 , ): _UpperCAmelCase = d_model _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = prediction_length _UpperCAmelCase = context_length _UpperCAmelCase = cardinality _UpperCAmelCase = num_time_features _UpperCAmelCase = lags_sequence _UpperCAmelCase = embedding_dimension _UpperCAmelCase = is_training _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = context_length _UpperCAmelCase = prediction_length + label_length _UpperCAmelCase = label_length _UpperCAmelCase = moving_average _UpperCAmelCase = autocorrelation_factor def lowercase ( self : Union[str, Any] ): return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def lowercase ( self : int , snake_case_ : Optional[Any] ): _UpperCAmelCase = config.context_length + max(config.lags_sequence ) _UpperCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) _UpperCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) _UpperCAmelCase = floats_tensor([self.batch_size, _past_length] ) _UpperCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs _UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) _UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length] ) _UpperCAmelCase = { "past_values": past_values, "static_categorical_features": static_categorical_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def lowercase ( self : List[Any] ): _UpperCAmelCase = self.get_config() _UpperCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ ) return config, inputs_dict def lowercase ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs() return config, inputs_dict def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : Optional[int] ): _UpperCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval() _UpperCAmelCase = model(**snake_case_ ) _UpperCAmelCase = outputs.encoder_last_hidden_state _UpperCAmelCase = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = model.get_encoder() encoder.save_pretrained(snake_case_ ) _UpperCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = model.create_network_inputs(**snake_case_ ) _UpperCAmelCase , _UpperCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) _UpperCAmelCase = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) _UpperCAmelCase = encoder(inputs_embeds=snake_case_ )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) _UpperCAmelCase = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) _UpperCAmelCase = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) _UpperCAmelCase = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) _UpperCAmelCase = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = model.get_decoder() decoder.save_pretrained(snake_case_ ) _UpperCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ ) _UpperCAmelCase = decoder( trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _lowerCamelCase : List[Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () _lowerCamelCase : Tuple = (AutoformerForPrediction,) if is_torch_available() else () _lowerCamelCase : List[Any] = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {} _lowerCamelCase : Optional[Any] = False _lowerCamelCase : Tuple = False _lowerCamelCase : int = False _lowerCamelCase : Optional[Any] = False _lowerCamelCase : Optional[Any] = False _lowerCamelCase : List[Any] = False def lowercase ( self : Tuple ): _UpperCAmelCase = AutoformerModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ ) def lowercase ( self : Optional[Any] ): self.config_tester.run_common_tests() def lowercase ( self : Union[str, Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case_ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(snake_case_ ) _UpperCAmelCase , _UpperCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ ) self.assertEqual(info["missing_keys"] , [] ) def lowercase ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ ) @unittest.skip(reason="Model has no tokens embeddings" ) def lowercase ( self : Optional[int] ): pass def lowercase ( self : Optional[int] ): _UpperCAmelCase = inspect.signature(getattr(snake_case_ , "forward" ) ) # The main input is the name of the argument after `self` _UpperCAmelCase = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , snake_case_ ) def lowercase ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case_ ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("future_observed_mask" ) expected_arg_names.extend( [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ ) def lowercase ( self : Optional[int] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = True _UpperCAmelCase = getattr(self.model_tester , "seq_length" , snake_case_ ) _UpperCAmelCase = getattr(self.model_tester , "decoder_seq_length" , snake_case_ ) _UpperCAmelCase = getattr(self.model_tester , "encoder_seq_length" , snake_case_ ) _UpperCAmelCase = getattr(self.model_tester , "d_model" , snake_case_ ) _UpperCAmelCase = getattr(self.model_tester , "num_attention_heads" , snake_case_ ) _UpperCAmelCase = d_model // num_attention_heads for model_class in self.all_model_classes: _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) _UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) _UpperCAmelCase = outputs.encoder_attentions self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) _UpperCAmelCase = len(snake_case_ ) _UpperCAmelCase = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(snake_case_ , snake_case_ ) # decoder attentions _UpperCAmelCase = outputs.decoder_attentions self.assertIsInstance(snake_case_ , (list, tuple) ) self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions _UpperCAmelCase = outputs.cross_attentions self.assertIsInstance(snake_case_ , (list, tuple) ) self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) self.assertEqual(out_len + 2 , len(snake_case_ ) ) _UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def lowercase ( self : Dict ): super().test_retain_grad_hidden_states_attentions() def UpperCAmelCase_ ( __lowercase : str="train-batch.pt" ) -> List[str]: '''simple docstring''' _UpperCAmelCase = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__lowercase , repo_type="dataset" ) _UpperCAmelCase = torch.load(__lowercase , map_location=__lowercase ) return batch @require_torch @slow class A_ ( unittest.TestCase ): def lowercase ( self : Optional[int] ): _UpperCAmelCase = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ ) _UpperCAmelCase = prepare_batch() with torch.no_grad(): _UpperCAmelCase = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0] _UpperCAmelCase = torch.Size( (6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , snake_case_ ) _UpperCAmelCase = torch.tensor( [[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=snake_case_ ) self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : Optional[Any] ): _UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ ) _UpperCAmelCase = prepare_batch("val-batch.pt" ) with torch.no_grad(): _UpperCAmelCase = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state _UpperCAmelCase = torch.Size((6_4, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , snake_case_ ) _UpperCAmelCase = torch.tensor( [[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=snake_case_ ) self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : Tuple ): _UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ ) _UpperCAmelCase = prepare_batch("val-batch.pt" ) with torch.no_grad(): _UpperCAmelCase = model.generate( static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , ) _UpperCAmelCase = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , snake_case_ ) _UpperCAmelCase = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=snake_case_ ) _UpperCAmelCase = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
22
0
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _UpperCamelCase : str = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class a ( lowerCAmelCase_, unittest.TestCase ): UpperCAmelCase_ : Any =ReformerTokenizer UpperCAmelCase_ : str =ReformerTokenizerFast UpperCAmelCase_ : Union[str, Any] =True UpperCAmelCase_ : Union[str, Any] =False UpperCAmelCase_ : Any =True def UpperCamelCase_ ( self ): super().setUp() lowercase = ReformerTokenizer(snake_case_ , keep_accents=snake_case_ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ): lowercase = '<s>' lowercase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ ) def UpperCamelCase_ ( self ): lowercase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<unk>' ) self.assertEqual(vocab_keys[1] , '<s>' ) self.assertEqual(vocab_keys[-1] , 'j' ) self.assertEqual(len(snake_case_ ) , 1_0_0_0 ) def UpperCamelCase_ ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def UpperCamelCase_ ( self ): if not self.test_rust_tokenizer: return lowercase = self.get_tokenizer() lowercase = self.get_rust_tokenizer() lowercase = 'I was born in 92000, and this is falsé.' lowercase = tokenizer.tokenize(snake_case_ ) lowercase = rust_tokenizer.tokenize(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) lowercase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) lowercase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) lowercase = self.get_rust_tokenizer() lowercase = tokenizer.encode(snake_case_ ) lowercase = rust_tokenizer.encode(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) def UpperCamelCase_ ( self , _lowerCamelCase=1_5 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): lowercase = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ ) # Simple input lowercase = 'This is a simple input' lowercase = ['This is a simple input 1', 'This is a simple input 2'] lowercase = ('This is a simple input', 'This is a pair') lowercase = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding='max_length' ) # Simple input self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding='max_length' ) # Simple input self.assertRaises( snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding='max_length' , ) # Pair input self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding='max_length' ) # Pair input self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding='max_length' ) # Pair input self.assertRaises( snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding='max_length' , ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowercase = ReformerTokenizer(snake_case_ , keep_accents=snake_case_ ) lowercase = tokenizer.tokenize('This is a test' ) self.assertListEqual(snake_case_ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case_ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , ) lowercase = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( snake_case_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) lowercase = tokenizer.convert_tokens_to_ids(snake_case_ ) self.assertListEqual( snake_case_ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , ) lowercase = tokenizer.convert_ids_to_tokens(snake_case_ ) self.assertListEqual( snake_case_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def UpperCamelCase_ ( self ): return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' ) @slow def UpperCamelCase_ ( self ): lowercase = 'Hello World!' lowercase = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7] self.assertListEqual(snake_case_ , self.big_tokenizer.encode(snake_case_ ) ) @slow def UpperCamelCase_ ( self ): lowercase = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) lowercase = [ 1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 3_5, 2_8, 2_7_5, 3, 2_5_9, 2_9_7, 2_6_0, 8_4, 4, 3_5, 1_1_0, 4_4, 8, 2_5_9, 9_1, 2_6_8, 2_1, 1_1, 2_0_9, 2_7_4, 1_0_9, 2_6_6, 2_7_7, 1_1_7, 8_6, 9_3, 3_1_5, 2_5_8, 2_7_8, 2_5_8, 2_7_7, 2_5_8, 0, 2_5_8, 2_8_8, 2_5_8, 3_1_9, 2_5_8, 0, 2_5_8, 0, 2_5_8, 0, 2_5_8, 0, 2_5_8, 2_8_7, 2_5_8, 3_1_5, 2_5_8, 2_8_9, 2_5_8, 2_7_8, 9_9, 2_6_9, 2_6_6, 2_6_2, 8, 2_5_9, 2_4_1, 4, 2_1_7, 2_3_0, 2_6_8, 2_6_6, 5_5, 1_6_8, 1_0_6, 7_5, 1_9_3, 2_6_6, 2_2_3, 2_7, 4_9, 2_6, 2_8_2, 2_5, 2_6_4, 2_9_9, 1_9, 2_6, 0, 2_5_8, 2_7_7, 1_1_7, 8_6, 9_3, 1_7_6, 1_8_3, 2_7_0, 1_1, 2_6_2, 4_2, 6_1, 2_6_5, ] self.assertListEqual(snake_case_ , self.big_tokenizer.encode(snake_case_ ) ) @require_torch @slow def UpperCamelCase_ ( self ): import torch from transformers import ReformerConfig, ReformerModel # Build sequence lowercase = list(self.big_tokenizer.get_vocab().keys() )[:1_0] lowercase = ' '.join(snake_case_ ) lowercase = self.big_tokenizer.encode_plus(snake_case_ , return_tensors='pt' ) lowercase = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='pt' ) lowercase = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) lowercase = encoded_sequence['input_ids'].shape lowercase = ReformerModel(snake_case_ ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**snake_case_ ) model(**snake_case_ ) @slow def UpperCamelCase_ ( self ): # fmt: off lowercase = {'input_ids': [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 lowercase = [ 'This is a very simple sentence.', 'The quick brown fox jumps over the lazy dog.', ] self.tokenizer_integration_test_util( expected_encoding=snake_case_ , model_name='google/reformer-crime-and-punishment' , revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a' , padding=snake_case_ , sequences=snake_case_ , )
220
'''simple docstring''' from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging __SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__) class A_ : _lowerCamelCase : str _lowerCamelCase : str = None @staticmethod def lowercase ( ): raise NotImplementedError def lowercase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : str , **snake_case_ : List[Any] ): raise NotImplementedError def lowercase ( self : Any , snake_case_ : int ): raise NotImplementedError def lowercase ( self : List[str] ): if not self.is_available(): raise RuntimeError( f'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' ) @classmethod def lowercase ( cls : List[Any] ): return f'`pip install {cls.pip_package or cls.name}`' class A_ ( lowerCAmelCase_ ): _lowerCamelCase : int = """optuna""" @staticmethod def lowercase ( ): return is_optuna_available() def lowercase ( self : List[str] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : Tuple ): return run_hp_search_optuna(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) def lowercase ( self : int , snake_case_ : Optional[int] ): return default_hp_space_optuna(snake_case_ ) class A_ ( lowerCAmelCase_ ): _lowerCamelCase : Any = """ray""" _lowerCamelCase : Tuple = """'ray[tune]'""" @staticmethod def lowercase ( ): return is_ray_available() def lowercase ( self : Optional[Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : List[str] ): return run_hp_search_ray(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) def lowercase ( self : Any , snake_case_ : str ): return default_hp_space_ray(snake_case_ ) class A_ ( lowerCAmelCase_ ): _lowerCamelCase : int = """sigopt""" @staticmethod def lowercase ( ): return is_sigopt_available() def lowercase ( self : Any , snake_case_ : int , snake_case_ : int , snake_case_ : str , **snake_case_ : Dict ): return run_hp_search_sigopt(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) def lowercase ( self : Dict , snake_case_ : Optional[Any] ): return default_hp_space_sigopt(snake_case_ ) class A_ ( lowerCAmelCase_ ): _lowerCamelCase : Optional[int] = """wandb""" @staticmethod def lowercase ( ): return is_wandb_available() def lowercase ( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : str , **snake_case_ : Optional[Any] ): return run_hp_search_wandb(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) def lowercase ( self : Any , snake_case_ : Union[str, Any] ): return default_hp_space_wandb(snake_case_ ) __SCREAMING_SNAKE_CASE :Dict = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def UpperCAmelCase_ ( ) -> str: '''simple docstring''' _UpperCAmelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(__lowercase ) > 0: _UpperCAmelCase = available_backends[0].name if len(__lowercase ) > 1: logger.info( f'{len(__lowercase )} hyperparameter search backends available. Using {name} as the default.' ) return name raise RuntimeError( "No hyperparameter search backend available.\n" + "\n".join( f' - To install {backend.name} run {backend.pip_install()}' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
22
0
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : Union[str, Any] , __lowercase : List[Any] ): """simple docstring""" for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ): __lowercase =model_result['result'][batch_size][sequence_length] self.assertIsNotNone(snake_case_ ) def snake_case ( self : Optional[int] ): """simple docstring""" __lowercase ='sshleifer/tiny-gpt2' __lowercase =PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , ) __lowercase =PyTorchBenchmark(snake_case_ ) __lowercase =benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def snake_case ( self : Optional[int] ): """simple docstring""" __lowercase ='sgugger/tiny-distilbert-classification' __lowercase =PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , only_pretrain_model=snake_case_ , ) __lowercase =PyTorchBenchmark(snake_case_ ) __lowercase =benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def snake_case ( self : str ): """simple docstring""" __lowercase ='sshleifer/tiny-gpt2' __lowercase =PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , torchscript=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , ) __lowercase =PyTorchBenchmark(snake_case_ ) __lowercase =benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' ) def snake_case ( self : str ): """simple docstring""" __lowercase ='sshleifer/tiny-gpt2' __lowercase =PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , fpaa=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , ) __lowercase =PyTorchBenchmark(snake_case_ ) __lowercase =benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def snake_case ( self : Tuple ): """simple docstring""" __lowercase ='sshleifer/tiny-gpt2' __lowercase =AutoConfig.from_pretrained(snake_case_ ) # set architectures equal to `None` __lowercase =None __lowercase =PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , ) __lowercase =PyTorchBenchmark(snake_case_ , configs=[config] ) __lowercase =benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def snake_case ( self : List[str] ): """simple docstring""" __lowercase ='sshleifer/tiny-gpt2' __lowercase =PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , ) __lowercase =PyTorchBenchmark(snake_case_ ) __lowercase =benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' ) def snake_case ( self : int ): """simple docstring""" __lowercase ='sshleifer/tiny-gpt2' __lowercase =PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=snake_case_ , multi_process=snake_case_ , ) __lowercase =PyTorchBenchmark(snake_case_ ) __lowercase =benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def snake_case ( self : int ): """simple docstring""" __lowercase ='sshleifer/tiny-gpt2' __lowercase =AutoConfig.from_pretrained(snake_case_ ) __lowercase =PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , ) __lowercase =PyTorchBenchmark(snake_case_ , configs=[config] ) __lowercase =benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def snake_case ( self : Optional[int] ): """simple docstring""" __lowercase ='sshleifer/tinier_bart' __lowercase =AutoConfig.from_pretrained(snake_case_ ) __lowercase =PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , ) __lowercase =PyTorchBenchmark(snake_case_ , configs=[config] ) __lowercase =benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def snake_case ( self : Any ): """simple docstring""" __lowercase ='sshleifer/tiny-gpt2' __lowercase =AutoConfig.from_pretrained(snake_case_ ) __lowercase =PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , ) __lowercase =PyTorchBenchmark(snake_case_ , configs=[config] ) __lowercase =benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def snake_case ( self : List[str] ): """simple docstring""" __lowercase ='sshleifer/tinier_bart' __lowercase =AutoConfig.from_pretrained(snake_case_ ) __lowercase =PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case_ , ) __lowercase =PyTorchBenchmark(snake_case_ , configs=[config] ) __lowercase =benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def snake_case ( self : Union[str, Any] ): """simple docstring""" __lowercase ='sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: __lowercase =PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , save_to_csv=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(snake_case_ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(snake_case_ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(snake_case_ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(snake_case_ , 'train_time.csv' ) , env_info_csv_file=os.path.join(snake_case_ , 'env.csv' ) , multi_process=snake_case_ , ) __lowercase =PyTorchBenchmark(snake_case_ ) benchmark.run() self.assertTrue(Path(os.path.join(snake_case_ , 'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(snake_case_ , 'train_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(snake_case_ , 'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(snake_case_ , 'train_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(snake_case_ , 'env.csv' ) ).exists() ) def snake_case ( self : List[Any] ): """simple docstring""" __lowercase ='sshleifer/tiny-gpt2' def _check_summary_is_not_empty(__lowercase : Any ): self.assertTrue(hasattr(snake_case_ , 'sequential' ) ) self.assertTrue(hasattr(snake_case_ , 'cumulative' ) ) self.assertTrue(hasattr(snake_case_ , 'current' ) ) self.assertTrue(hasattr(snake_case_ , 'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __lowercase =PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case_ , inference=snake_case_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(snake_case_ , 'log.txt' ) , log_print=snake_case_ , trace_memory_line_by_line=snake_case_ , multi_process=snake_case_ , ) __lowercase =PyTorchBenchmark(snake_case_ ) __lowercase =benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(snake_case_ , 'log.txt' ) ).exists() )
141
'''simple docstring''' __SCREAMING_SNAKE_CASE :List[str] = '''0.18.2''' from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
22
0
import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Optional[int] = logging.get_logger(__name__) lowerCamelCase : str = { '''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''', '''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''', } class __lowercase (lowerCAmelCase_ ): """simple docstring""" _snake_case = """encodec""" def __init__( self , A=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , A=2_4_0_0_0 , A=1 , A=False , A=None , A=None , A=1_2_8 , A=3_2 , A=1 , A=[8, 5, 4, 2] , A="weight_norm" , A=7 , A=7 , A=3 , A=2 , A=True , A="reflect" , A=2 , A=2 , A=1.0 , A=1_0_2_4 , A=None , A=True , **A , ) -> List[Any]: snake_case : Optional[Any] = target_bandwidths snake_case : Union[str, Any] = sampling_rate snake_case : Dict = audio_channels snake_case : str = normalize snake_case : List[Any] = chunk_length_s snake_case : Any = overlap snake_case : Optional[int] = hidden_size snake_case : Union[str, Any] = num_filters snake_case : Dict = num_residual_layers snake_case : str = upsampling_ratios snake_case : str = norm_type snake_case : int = kernel_size snake_case : Optional[Any] = last_kernel_size snake_case : str = residual_kernel_size snake_case : str = dilation_growth_rate snake_case : Optional[int] = use_causal_conv snake_case : Optional[Any] = pad_mode snake_case : Optional[int] = compress snake_case : Any = num_lstm_layers snake_case : List[str] = trim_right_ratio snake_case : int = codebook_size snake_case : Optional[Any] = codebook_dim if codebook_dim is not None else hidden_size snake_case : List[Any] = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" ) super().__init__(**snake_case_ ) @property def UpperCAmelCase ( self ) -> str: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def UpperCAmelCase ( self ) -> int: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def UpperCAmelCase ( self ) -> Optional[int]: snake_case : List[str] = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def UpperCAmelCase ( self ) -> int: return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0) )
124
'''simple docstring''' import re from filelock import FileLock try: import nltk __SCREAMING_SNAKE_CASE :Optional[int] = True except (ImportError, ModuleNotFoundError): __SCREAMING_SNAKE_CASE :str = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def UpperCAmelCase_ ( __lowercase : str ) -> str: '''simple docstring''' re.sub("<n>" , "" , __lowercase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__lowercase ) )
22
0
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]: '''simple docstring''' monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() ) @pytest.fixture def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Any: '''simple docstring''' class A_ : def __init__( self , _A ): '''simple docstring''' UpperCAmelCase = metric_id class A_ : UpperCAmelCase__ = [MetricMock(lowerCAmelCase_ ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]] def _lowercase ( self ): '''simple docstring''' return self._metrics monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() ) @pytest.mark.parametrize( '''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] ) def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int: '''simple docstring''' if "tmp_path" in args: UpperCAmelCase = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args ) with pytest.warns(__lowercase , match='''https://huggingface.co/docs/evaluate''' ): func(*__lowercase )
273
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class A_ : def __init__( self : str , snake_case_ : int , snake_case_ : Union[str, Any]=2 , snake_case_ : List[Any]=True , snake_case_ : str=False , snake_case_ : str=1_0 , snake_case_ : str=3 , snake_case_ : Dict=3_2 * 4 , snake_case_ : Any=3_2 * 6 , snake_case_ : Optional[Any]=4 , snake_case_ : Optional[int]=3_2 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = is_training _UpperCAmelCase = use_auxiliary_loss _UpperCAmelCase = num_queries _UpperCAmelCase = num_channels _UpperCAmelCase = min_size _UpperCAmelCase = max_size _UpperCAmelCase = num_labels _UpperCAmelCase = mask_feature_size def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( snake_case_ ) _UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_ ) _UpperCAmelCase = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_ ) > 0.5 ).float() _UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=snake_case_ ) > 0.5).long() _UpperCAmelCase = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase ( self : List[Any] ): return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def lowercase ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def lowercase ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ): _UpperCAmelCase = output.encoder_hidden_states _UpperCAmelCase = output.pixel_decoder_hidden_states _UpperCAmelCase = output.transformer_decoder_hidden_states self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(snake_case_ ) , config.decoder_config.decoder_layers ) def lowercase ( self : Tuple , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Optional[Any]=False ): with torch.no_grad(): _UpperCAmelCase = MaskFormerModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() _UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ ) _UpperCAmelCase = model(snake_case_ , output_hidden_states=snake_case_ ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(snake_case_ , snake_case_ ) def lowercase ( self : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : str , snake_case_ : List[Any] ): _UpperCAmelCase = MaskFormerForInstanceSegmentation(config=snake_case_ ) model.to(snake_case_ ) model.eval() def comm_check_on_output(snake_case_ : int ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ ) _UpperCAmelCase = model(snake_case_ ) comm_check_on_output(snake_case_ ) _UpperCAmelCase = model( pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ) comm_check_on_output(snake_case_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _lowerCamelCase : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () _lowerCamelCase : Tuple = ( {"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) _lowerCamelCase : Optional[Any] = False _lowerCamelCase : Dict = False _lowerCamelCase : Any = False _lowerCamelCase : List[Any] = False def lowercase ( self : Optional[int] ): _UpperCAmelCase = MaskFormerModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ ) def lowercase ( self : Optional[Any] ): self.config_tester.run_common_tests() def lowercase ( self : Union[str, Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ ) def lowercase ( self : int ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*snake_case_ ) @unittest.skip(reason="MaskFormer does not use inputs_embeds" ) def lowercase ( self : Any ): pass @unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" ) def lowercase ( self : List[str] ): pass @unittest.skip(reason="MaskFormer is not a generative model" ) def lowercase ( self : List[str] ): pass @unittest.skip(reason="MaskFormer does not use token embeddings" ) def lowercase ( self : List[Any] ): pass @require_torch_multi_gpu @unittest.skip( reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def lowercase ( self : Any ): pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def lowercase ( self : Union[str, Any] ): pass def lowercase ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case_ ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case_ ) @slow def lowercase ( self : Optional[int] ): for model_name in ["facebook/maskformer-swin-small-coco"]: _UpperCAmelCase = MaskFormerModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def lowercase ( self : Optional[int] ): _UpperCAmelCase = (self.model_tester.min_size,) * 2 _UpperCAmelCase = { "pixel_values": torch.randn((2, 3, *size) , device=snake_case_ ), "mask_labels": torch.randn((2, 1_0, *size) , device=snake_case_ ), "class_labels": torch.zeros(2 , 1_0 , device=snake_case_ ).long(), } _UpperCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(snake_case_ ) _UpperCAmelCase = model(**snake_case_ ) self.assertTrue(outputs.loss is not None ) def lowercase ( self : Dict ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ ) def lowercase ( self : Any ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case_ ).to(snake_case_ ) _UpperCAmelCase = model(**snake_case_ , output_attentions=snake_case_ ) self.assertTrue(outputs.attentions is not None ) def lowercase ( self : int ): if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _UpperCAmelCase = self.all_model_classes[1] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.train() _UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ).loss loss.backward() def lowercase ( self : int ): # only MaskFormerForInstanceSegmentation has the loss _UpperCAmelCase = self.all_model_classes[1] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.train() _UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ) _UpperCAmelCase = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _UpperCAmelCase = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _UpperCAmelCase = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _UpperCAmelCase = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=snake_case_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __SCREAMING_SNAKE_CASE :Dict = 1e-4 def UpperCAmelCase_ ( ) -> List[str]: '''simple docstring''' _UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_vision @slow class A_ ( unittest.TestCase ): @cached_property def lowercase ( self : Dict ): return ( MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" ) if is_vision_available() else None ) def lowercase ( self : List[Any] ): _UpperCAmelCase = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(snake_case_ ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ ) _UpperCAmelCase = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) _UpperCAmelCase = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(snake_case_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) _UpperCAmelCase = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(snake_case_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) _UpperCAmelCase = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(snake_case_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : Tuple ): _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(snake_case_ ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ ) _UpperCAmelCase = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) # masks_queries_logits _UpperCAmelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _UpperCAmelCase = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] _UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) # class_queries_logits _UpperCAmelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _UpperCAmelCase = torch.tensor( [ [1.6_512e00, -5.2_572e00, -3.3_519e00], [3.6_169e-02, -5.9_025e00, -2.9_313e00], [1.0_766e-04, -7.7_630e00, -5.1_263e00], ] ).to(snake_case_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : int ): _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" ) .to(snake_case_ ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ ) _UpperCAmelCase = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) # masks_queries_logits _UpperCAmelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _UpperCAmelCase = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] _UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) # class_queries_logits _UpperCAmelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _UpperCAmelCase = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(snake_case_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : List[Any] ): _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(snake_case_ ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="pt" , ) _UpperCAmelCase = inputs["pixel_values"].to(snake_case_ ) _UpperCAmelCase = [el.to(snake_case_ ) for el in inputs["mask_labels"]] _UpperCAmelCase = [el.to(snake_case_ ) for el in inputs["class_labels"]] with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) self.assertTrue(outputs.loss is not None )
22
0
import math def __snake_case ( __UpperCamelCase : int ): """simple docstring""" return math.sqrt(__lowercase ) * math.sqrt(__lowercase ) == num def __snake_case ( __UpperCamelCase : int ): """simple docstring""" A_ = 0 A_ = n while left <= right: A_ = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: A_ = mid - 1 else: A_ = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
312
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: __SCREAMING_SNAKE_CASE :List[Any] = None __SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE :List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} __SCREAMING_SNAKE_CASE :List[Any] = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''', }, } __SCREAMING_SNAKE_CASE :Optional[Any] = { '''albert-base-v1''': 512, '''albert-large-v1''': 512, '''albert-xlarge-v1''': 512, '''albert-xxlarge-v1''': 512, '''albert-base-v2''': 512, '''albert-large-v2''': 512, '''albert-xlarge-v2''': 512, '''albert-xxlarge-v2''': 512, } __SCREAMING_SNAKE_CASE :Optional[int] = '''▁''' class A_ ( lowerCAmelCase_ ): _lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES _lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : int = AlbertTokenizer def __init__( self : Optional[Any] , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=True , snake_case_ : str=True , snake_case_ : Tuple=False , snake_case_ : List[Any]="[CLS]" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : str="<unk>" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : List[Any]="<pad>" , snake_case_ : List[str]="[CLS]" , snake_case_ : int="[MASK]" , **snake_case_ : Any , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _UpperCAmelCase = ( AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token ) super().__init__( snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , ) _UpperCAmelCase = do_lower_case _UpperCAmelCase = remove_space _UpperCAmelCase = keep_accents _UpperCAmelCase = vocab_file _UpperCAmelCase = False if not self.vocab_file else True def lowercase ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ): _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowercase ( self : Dict , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ): _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(snake_case_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return _UpperCAmelCase = os.path.join( snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ): copyfile(self.vocab_file , snake_case_ ) return (out_vocab_file,)
22
0
def _lowercase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = 0 ): __lowerCAmelCase : str = right or len(__lowercase ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(__lowercase , __lowercase , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
275
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging __SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE :int = { '''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class A_ ( lowerCAmelCase_ ): _lowerCamelCase : int = """perceiver""" def __init__( self : Any , snake_case_ : List[Any]=2_5_6 , snake_case_ : str=1_2_8_0 , snake_case_ : Optional[int]=7_6_8 , snake_case_ : int=1 , snake_case_ : List[Any]=2_6 , snake_case_ : Dict=8 , snake_case_ : List[Any]=8 , snake_case_ : Tuple=None , snake_case_ : Tuple=None , snake_case_ : Any="kv" , snake_case_ : Any=1 , snake_case_ : List[str]=1 , snake_case_ : Optional[int]="gelu" , snake_case_ : List[Any]=0.1 , snake_case_ : Dict=0.0_2 , snake_case_ : int=1e-12 , snake_case_ : List[str]=True , snake_case_ : str=2_6_2 , snake_case_ : Optional[Any]=2_0_4_8 , snake_case_ : Union[str, Any]=5_6 , snake_case_ : Dict=[3_6_8, 4_9_6] , snake_case_ : Tuple=1_6 , snake_case_ : Union[str, Any]=1_9_2_0 , snake_case_ : List[Any]=1_6 , snake_case_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **snake_case_ : List[Any] , ): super().__init__(**snake_case_ ) _UpperCAmelCase = num_latents _UpperCAmelCase = d_latents _UpperCAmelCase = d_model _UpperCAmelCase = num_blocks _UpperCAmelCase = num_self_attends_per_block _UpperCAmelCase = num_self_attention_heads _UpperCAmelCase = num_cross_attention_heads _UpperCAmelCase = qk_channels _UpperCAmelCase = v_channels _UpperCAmelCase = cross_attention_shape_for_attention _UpperCAmelCase = self_attention_widening_factor _UpperCAmelCase = cross_attention_widening_factor _UpperCAmelCase = hidden_act _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = use_query_residual # masked language modeling attributes _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings # image classification attributes _UpperCAmelCase = image_size # flow attributes _UpperCAmelCase = train_size # multimodal autoencoding attributes _UpperCAmelCase = num_frames _UpperCAmelCase = audio_samples_per_frame _UpperCAmelCase = samples_per_patch _UpperCAmelCase = output_shape class A_ ( lowerCAmelCase_ ): @property def lowercase ( self : int ): if self.task == "multiple-choice": _UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"} else: _UpperCAmelCase = {0: "batch", 1: "sequence"} return OrderedDict( [ ("inputs", dynamic_axis), ("attention_mask", dynamic_axis), ] ) @property def lowercase ( self : Optional[Any] ): return 1e-4 def lowercase ( self : List[str] , snake_case_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , snake_case_ : int = 3 , snake_case_ : int = 4_0 , snake_case_ : int = 4_0 , ): # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(snake_case_ , snake_case_ ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _UpperCAmelCase = compute_effective_axis_dimension( snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _UpperCAmelCase = preprocessor.num_special_tokens_to_add(snake_case_ ) _UpperCAmelCase = compute_effective_axis_dimension( snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ ) # Generate dummy inputs according to compute batch and sequence _UpperCAmelCase = [" ".join(["a"] ) * seq_length] * batch_size _UpperCAmelCase = dict(preprocessor(snake_case_ , return_tensors=snake_case_ ) ) _UpperCAmelCase = inputs.pop("input_ids" ) return inputs elif isinstance(snake_case_ , snake_case_ ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _UpperCAmelCase = compute_effective_axis_dimension(snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch ) _UpperCAmelCase = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) _UpperCAmelCase = dict(preprocessor(images=snake_case_ , return_tensors=snake_case_ ) ) _UpperCAmelCase = inputs.pop("pixel_values" ) return inputs else: raise ValueError( "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
22
0
"""simple docstring""" import argparse import json from tqdm import tqdm def lowercase__( ): lowercase_ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--src_path' , type=__lowercase , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , ) parser.add_argument( '--evaluation_set' , type=__lowercase , help='where to store parsed evaluation_set file' , ) parser.add_argument( '--gold_data_path' , type=__lowercase , help='where to store parsed gold_data_path file' , ) lowercase_ : Optional[Any] = parser.parse_args() with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open( args.gold_data_path , 'w' ) as gold_file: lowercase_ : Union[str, Any] = json.load(__lowercase ) for dpr_record in tqdm(__lowercase ): lowercase_ : int = dpr_record['question'] lowercase_ : int = [context['title'] for context in dpr_record['positive_ctxs']] eval_file.write(question + '\n' ) gold_file.write('\t'.join(__lowercase ) + '\n' ) if __name__ == "__main__": main()
213
'''simple docstring''' import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef __SCREAMING_SNAKE_CASE :List[str] = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Tuple ) -> int: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) return (preds == labels).mean() def UpperCAmelCase_ ( __lowercase : int , __lowercase : str ) -> Optional[Any]: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) _UpperCAmelCase = simple_accuracy(__lowercase , __lowercase ) _UpperCAmelCase = fa_score(y_true=__lowercase , y_pred=__lowercase ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : List[str] ) -> List[Any]: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) _UpperCAmelCase = pearsonr(__lowercase , __lowercase )[0] _UpperCAmelCase = spearmanr(__lowercase , __lowercase )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : str , __lowercase : str ) -> Tuple: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) assert len(__lowercase ) == len(__lowercase ), f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}' if task_name == "cola": return {"mcc": matthews_corrcoef(__lowercase , __lowercase )} elif task_name == "sst-2": return {"acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "mrpc": return acc_and_fa(__lowercase , __lowercase ) elif task_name == "sts-b": return pearson_and_spearman(__lowercase , __lowercase ) elif task_name == "qqp": return acc_and_fa(__lowercase , __lowercase ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "qnli": return {"acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "rte": return {"acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "wnli": return {"acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "hans": return {"acc": simple_accuracy(__lowercase , __lowercase )} else: raise KeyError(__lowercase ) def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Dict , __lowercase : str ) -> Union[str, Any]: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) if len(__lowercase ) != len(__lowercase ): raise ValueError(f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}' ) if task_name == "xnli": return {"acc": simple_accuracy(__lowercase , __lowercase )} else: raise KeyError(__lowercase )
22
0
'''simple docstring''' import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __A ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : str = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline""" def a__ (self , A=0 ) -> Tuple: """simple docstring""" _a = np.random.RandomState(snake_case_ ) _a = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def a__ (self ) -> Any: """simple docstring""" _a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=snake_case_ ) _a = self.get_dummy_inputs() _a = pipe(**snake_case_ ).images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) _a = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def a__ (self ) -> Union[str, Any]: """simple docstring""" _a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) _a = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) _a = self.get_dummy_inputs() _a = pipe(**snake_case_ ).images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) _a = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def a__ (self ) -> List[str]: """simple docstring""" _a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) _a = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=snake_case_ ) _a = self.get_dummy_inputs() _a = pipe(**snake_case_ ).images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) _a = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def a__ (self ) -> Tuple: """simple docstring""" _a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) _a = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=snake_case_ ) _a = self.get_dummy_inputs() _a = pipe(**snake_case_ ).images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) _a = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def a__ (self ) -> Optional[Any]: """simple docstring""" _a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) _a = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=snake_case_ ) _a = self.get_dummy_inputs() _a = pipe(**snake_case_ ).images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) _a = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def a__ (self ) -> Union[str, Any]: """simple docstring""" _a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) _a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=snake_case_ ) _a = self.get_dummy_inputs() _a = pipe(**snake_case_ ).images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) _a = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def a__ (self ) -> Union[str, Any]: """simple docstring""" _a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=snake_case_ ) _a = self.get_dummy_inputs() _a = 3 * [inputs['''prompt''']] # forward _a = pipe(**snake_case_ ) _a = output.images[0, -3:, -3:, -1] _a = self.get_dummy_inputs() _a = 3 * [inputs.pop('''prompt''' )] _a = pipe.tokenizer( snake_case_ , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=snake_case_ , return_tensors='''np''' , ) _a = text_inputs['''input_ids'''] _a = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] _a = prompt_embeds # forward _a = pipe(**snake_case_ ) _a = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 def a__ (self ) -> Union[str, Any]: """simple docstring""" _a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=snake_case_ ) _a = self.get_dummy_inputs() _a = 3 * ['''this is a negative prompt'''] _a = negative_prompt _a = 3 * [inputs['''prompt''']] # forward _a = pipe(**snake_case_ ) _a = output.images[0, -3:, -3:, -1] _a = self.get_dummy_inputs() _a = 3 * [inputs.pop('''prompt''' )] _a = [] for p in [prompt, negative_prompt]: _a = pipe.tokenizer( snake_case_ , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=snake_case_ , return_tensors='''np''' , ) _a = text_inputs['''input_ids'''] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] ) _a , _a = embeds # forward _a = pipe(**snake_case_ ) _a = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @nightly @require_onnxruntime @require_torch_gpu class __A ( unittest.TestCase ): '''simple docstring''' @property def a__ (self ) -> str: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a__ (self ) -> Optional[Any]: """simple docstring""" _a = ort.SessionOptions() _a = False return options def a__ (self ) -> Dict: """simple docstring""" _a = OnnxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=snake_case_ ) _a = '''A painting of a squirrel eating a burger''' np.random.seed(0 ) _a = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='''np''' ) _a = output.images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _a = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def a__ (self ) -> Any: """simple docstring""" _a = DDIMScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) _a = OnnxStableDiffusionPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=snake_case_ ) _a = '''open neural network exchange''' _a = np.random.RandomState(0 ) _a = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type='''np''' ) _a = output.images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _a = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def a__ (self ) -> Any: """simple docstring""" _a = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) _a = OnnxStableDiffusionPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=snake_case_ ) _a = '''open neural network exchange''' _a = np.random.RandomState(0 ) _a = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type='''np''' ) _a = output.images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _a = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def a__ (self ) -> int: """simple docstring""" _a = 0 def test_callback_fn(A , A , A ) -> None: _a = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 64, 64) _a = latents[0, -3:, -3:, -1] _a = np.array( [-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3 elif step == 5: assert latents.shape == (1, 4, 64, 64) _a = latents[0, -3:, -3:, -1] _a = np.array( [-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3 _a = False _a = OnnxStableDiffusionPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=snake_case_ ) _a = '''Andromeda galaxy in a bottle''' _a = np.random.RandomState(0 ) pipe( prompt=snake_case_ , num_inference_steps=5 , guidance_scale=7.5 , generator=snake_case_ , callback=snake_case_ , callback_steps=1 , ) assert test_callback_fn.has_been_called assert number_of_steps == 6 def a__ (self ) -> List[str]: """simple docstring""" _a = OnnxStableDiffusionPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) assert isinstance(snake_case_ , snake_case_ ) assert pipe.safety_checker is None _a = pipe('''example prompt''' , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(snake_case_ ) _a = OnnxStableDiffusionPipeline.from_pretrained(snake_case_ ) # sanity check that the pipeline still works assert pipe.safety_checker is None _a = pipe('''example prompt''' , num_inference_steps=2 ).images[0] assert image is not None
211
'''simple docstring''' import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict , __lowercase : str , __lowercase : Optional[Any] , __lowercase : str ) -> List[str]: '''simple docstring''' _UpperCAmelCase = TapasConfig.from_json_file(__lowercase ) # set absolute/relative position embeddings parameter _UpperCAmelCase = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase ) elif task == "WTQ": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = True # hparam_utils.py hparams _UpperCAmelCase = 0.66_4694 _UpperCAmelCase = 0.20_7951 _UpperCAmelCase = 0.12_1194 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = 0.035_2513 _UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = False # hparam_utils.py hparams _UpperCAmelCase = 36.4519 _UpperCAmelCase = 0.90_3421 _UpperCAmelCase = 222.088 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = 0.76_3141 _UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase ) elif task == "TABFACT": _UpperCAmelCase = TapasForSequenceClassification(config=__lowercase ) elif task == "MLM": _UpperCAmelCase = TapasForMaskedLM(config=__lowercase ) elif task == "INTERMEDIATE_PRETRAINING": _UpperCAmelCase = TapasModel(config=__lowercase ) else: raise ValueError(f'Task {task} not supported.' ) print(f'Building PyTorch model from configuration: {config}' ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__lowercase , __lowercase , __lowercase ) # Save pytorch-model (weights and configuration) print(f'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(__lowercase ) # Save tokenizer files print(f'Save tokenizer files to {pytorch_dump_path}' ) _UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 ) tokenizer.save_pretrained(__lowercase ) print("Used relative position embeddings:" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.''' ) parser.add_argument( '''--reset_position_index_per_cell''', default=False, action='''store_true''', help='''Whether to use relative position embeddings or not. Defaults to True.''', ) parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--tapas_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained TAPAS model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __SCREAMING_SNAKE_CASE :List[str] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
22
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case : Optional[Any] = logging.get_logger(__name__) __snake_case : str = { '''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''', # See all PEGASUS models at https://huggingface.co/models?filter=pegasus } class A__ ( lowerCAmelCase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE = """pegasus""" SCREAMING_SNAKE_CASE = ["""past_key_values"""] SCREAMING_SNAKE_CASE = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]=5_0265 , _SCREAMING_SNAKE_CASE: str=1024 , _SCREAMING_SNAKE_CASE: Any=12 , _SCREAMING_SNAKE_CASE: Optional[int]=4096 , _SCREAMING_SNAKE_CASE: List[Any]=16 , _SCREAMING_SNAKE_CASE: Tuple=12 , _SCREAMING_SNAKE_CASE: Optional[int]=4096 , _SCREAMING_SNAKE_CASE: Any=16 , _SCREAMING_SNAKE_CASE: Tuple=0.0 , _SCREAMING_SNAKE_CASE: List[Any]=0.0 , _SCREAMING_SNAKE_CASE: Optional[int]=True , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: List[Any]="gelu" , _SCREAMING_SNAKE_CASE: Dict=1024 , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: Tuple=0.0 , _SCREAMING_SNAKE_CASE: Optional[int]=0.0 , _SCREAMING_SNAKE_CASE: List[Any]=0.02 , _SCREAMING_SNAKE_CASE: Optional[int]=0 , _SCREAMING_SNAKE_CASE: str=False , _SCREAMING_SNAKE_CASE: Dict=0 , _SCREAMING_SNAKE_CASE: Dict=1 , _SCREAMING_SNAKE_CASE: List[str]=1 , **_SCREAMING_SNAKE_CASE: str , ) -> Tuple: """simple docstring""" __lowerCAmelCase : Optional[int] = vocab_size __lowerCAmelCase : str = max_position_embeddings __lowerCAmelCase : List[str] = d_model __lowerCAmelCase : Optional[Any] = encoder_ffn_dim __lowerCAmelCase : List[str] = encoder_layers __lowerCAmelCase : str = encoder_attention_heads __lowerCAmelCase : List[Any] = decoder_ffn_dim __lowerCAmelCase : Tuple = decoder_layers __lowerCAmelCase : str = decoder_attention_heads __lowerCAmelCase : List[str] = dropout __lowerCAmelCase : List[Any] = attention_dropout __lowerCAmelCase : Tuple = activation_dropout __lowerCAmelCase : List[str] = activation_function __lowerCAmelCase : List[Any] = init_std __lowerCAmelCase : Union[str, Any] = encoder_layerdrop __lowerCAmelCase : Union[str, Any] = decoder_layerdrop __lowerCAmelCase : Optional[int] = use_cache __lowerCAmelCase : Optional[Any] = encoder_layers __lowerCAmelCase : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , **snake_case_ , ) @property def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[Any]: """simple docstring""" return self.encoder_attention_heads @property def _SCREAMING_SNAKE_CASE ( self: Any) -> Tuple: """simple docstring""" return self.d_model
269
'''simple docstring''' import os from datetime import datetime as dt from github import Github __SCREAMING_SNAKE_CASE :str = [ '''good first issue''', '''feature request''', '''wip''', ] def UpperCAmelCase_ ( ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase = Github(os.environ["GITHUB_TOKEN"] ) _UpperCAmelCase = g.get_repo("huggingface/accelerate" ) _UpperCAmelCase = repo.get_issues(state="open" ) for issue in open_issues: _UpperCAmelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowercase : i.created_at , reverse=__lowercase ) _UpperCAmelCase = comments[0] if len(__lowercase ) > 0 else None _UpperCAmelCase = dt.utcnow() _UpperCAmelCase = (current_time - issue.updated_at).days _UpperCAmelCase = (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state="closed" ) elif ( days_since_updated > 23 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Add stale comment issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) if __name__ == "__main__": main()
22
0
import requests UpperCAmelCase = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=''' def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): lowercase = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page['articles'] , 1 ): print(F'''{i}.) {article['title']}''' ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
195
'''simple docstring''' import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( "files" , [ ["full:README.md", "dataset_infos.json"], ["empty:README.md", "dataset_infos.json"], ["dataset_infos.json"], ["full:README.md"], ] , ) def UpperCAmelCase_ ( __lowercase : Any , __lowercase : int ) -> int: '''simple docstring''' _UpperCAmelCase = tmp_path_factory.mktemp("dset_infos_dir" ) if "full:README.md" in files: with open(dataset_infos_dir / "README.md" , "w" ) as f: f.write("---\ndataset_info:\n dataset_size: 42\n---" ) if "empty:README.md" in files: with open(dataset_infos_dir / "README.md" , "w" ) as f: f.write("" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f: f.write("{\"default\": {\"dataset_size\": 42}}" ) _UpperCAmelCase = DatasetInfosDict.from_directory(__lowercase ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( "dataset_info" , [ DatasetInfo(), DatasetInfo( description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ), ] , ) def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : DatasetInfo ) -> Any: '''simple docstring''' _UpperCAmelCase = str(__lowercase ) dataset_info.write_to_directory(__lowercase ) _UpperCAmelCase = DatasetInfo.from_directory(__lowercase ) assert dataset_info == reloaded assert os.path.exists(os.path.join(__lowercase , "dataset_info.json" ) ) def UpperCAmelCase_ ( ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase = DatasetInfo( description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) _UpperCAmelCase = dataset_info._to_yaml_dict() assert sorted(__lowercase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) _UpperCAmelCase = yaml.safe_dump(__lowercase ) _UpperCAmelCase = yaml.safe_load(__lowercase ) assert dataset_info_yaml_dict == reloaded def UpperCAmelCase_ ( ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = DatasetInfo() _UpperCAmelCase = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( "dataset_infos_dict" , [ DatasetInfosDict(), DatasetInfosDict({"default": DatasetInfo()} ), DatasetInfosDict({"my_config_name": DatasetInfo()} ), DatasetInfosDict( { "default": DatasetInfo( description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ) } ), DatasetInfosDict( { "v1": DatasetInfo(dataset_size=42 ), "v2": DatasetInfo(dataset_size=1337 ), } ), ] , ) def UpperCAmelCase_ ( __lowercase : int , __lowercase : DatasetInfosDict ) -> Dict: '''simple docstring''' _UpperCAmelCase = str(__lowercase ) dataset_infos_dict.write_to_directory(__lowercase ) _UpperCAmelCase = DatasetInfosDict.from_directory(__lowercase ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): _UpperCAmelCase = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml _UpperCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(__lowercase , "README.md" ) )
22
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor a = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ ): def __init__( self : str , *lowerCAmelCase : List[str] , **lowerCAmelCase : int ): warnings.warn( """The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use LayoutLMv2ImageProcessor instead.""" , snake_case_ , ) super().__init__(*snake_case_ , **snake_case_ )
155
'''simple docstring''' def UpperCAmelCase_ ( __lowercase : str ) -> str: '''simple docstring''' return " ".join( "".join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('''Hey wollef sroirraw'''))
22
0
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase : int = logging.get_logger(__name__) class a ( lowerCAmelCase_ ): UpperCAmelCase_ : Tuple ="""encoder-decoder""" UpperCAmelCase_ : Dict =True def __init__( self , **_lowerCamelCase ): super().__init__(**snake_case_ ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" lowercase = kwargs.pop('encoder' ) lowercase = encoder_config.pop('model_type' ) lowercase = kwargs.pop('decoder' ) lowercase = decoder_config.pop('model_type' ) from ..auto.configuration_auto import AutoConfig lowercase = AutoConfig.for_model(snake_case_ , **snake_case_ ) lowercase = AutoConfig.for_model(snake_case_ , **snake_case_ ) lowercase = True @classmethod def UpperCamelCase_ ( cls , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ): logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' ) lowercase = True lowercase = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **snake_case_ ) def UpperCamelCase_ ( self ): lowercase = copy.deepcopy(self.__dict__ ) lowercase = self.encoder.to_dict() lowercase = self.decoder.to_dict() lowercase = self.__class__.model_type return output
220
'''simple docstring''' def UpperCAmelCase_ ( __lowercase : str ) -> list: '''simple docstring''' if n_term == "": return [] _UpperCAmelCase = [] for temp in range(int(__lowercase ) ): series.append(f'1/{temp + 1}' if series else "1" ) return series if __name__ == "__main__": __SCREAMING_SNAKE_CASE :str = input('''Enter the last number (nth term) of the Harmonic Series''') print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''') print(harmonic_series(nth_term))
22
0
'''simple docstring''' from __future__ import annotations from collections import Counter from random import random class lowerCAmelCase : def __init__( self : List[str] ): """simple docstring""" __lowercase ={} def snake_case ( self : int , __lowercase : str ): """simple docstring""" __lowercase ={} def snake_case ( self : Tuple , __lowercase : str , __lowercase : str , __lowercase : float ): """simple docstring""" if nodea not in self.connections: self.add_node(snake_case_ ) if nodea not in self.connections: self.add_node(snake_case_ ) __lowercase =probability def snake_case ( self : Tuple ): """simple docstring""" return list(self.connections ) def snake_case ( self : Dict , __lowercase : str ): """simple docstring""" __lowercase =0 __lowercase =random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def __UpperCamelCase ( lowercase__ : str, lowercase__ : list[tuple[str, str, float]], lowercase__ : int ): '''simple docstring''' __lowercase =MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(__lowercase, __lowercase, __lowercase ) __lowercase =Counter(graph.get_nodes() ) __lowercase =start for _ in range(__lowercase ): __lowercase =graph.transition(__lowercase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
141
'''simple docstring''' import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __SCREAMING_SNAKE_CASE :int = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''') @require_sentencepiece @require_tokenizers class A_ ( lowerCAmelCase_ , unittest.TestCase ): _lowerCamelCase : List[str] = PegasusTokenizer _lowerCamelCase : int = PegasusTokenizerFast _lowerCamelCase : Union[str, Any] = True _lowerCamelCase : List[str] = True def lowercase ( self : Optional[int] ): super().setUp() # We have a SentencePiece fixture for testing _UpperCAmelCase = PegasusTokenizer(snake_case_ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase ( self : Tuple ): return PegasusTokenizer.from_pretrained("google/pegasus-large" ) def lowercase ( self : Union[str, Any] , **snake_case_ : Union[str, Any] ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def lowercase ( self : Tuple , snake_case_ : Any ): return ("This is a test", "This is a test") def lowercase ( self : Optional[int] ): _UpperCAmelCase = "</s>" _UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ ) def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "</s>" ) self.assertEqual(vocab_keys[-1] , "v" ) self.assertEqual(len(snake_case_ ) , 1_1_0_3 ) def lowercase ( self : Any ): self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 ) def lowercase ( self : List[Any] ): _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) _UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname ) _UpperCAmelCase = ( "Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important" " </s> <pad> <pad> <pad>" ) _UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0] _UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0] self.assertListEqual(snake_case_ , snake_case_ ) def lowercase ( self : Tuple ): _UpperCAmelCase = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word _UpperCAmelCase = "<mask_1> To ensure a <mask_2> flow of bank resolutions." _UpperCAmelCase = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1] _UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=snake_case_ ).input_ids[0] self.assertListEqual(snake_case_ , snake_case_ ) def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6_1_0_3 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 1_0_3 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1_0_2_4 _UpperCAmelCase = "To ensure a smooth flow of bank resolutions." _UpperCAmelCase = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1] _UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=snake_case_ ).input_ids[0] self.assertListEqual(snake_case_ , snake_case_ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def lowercase ( self : int ): _UpperCAmelCase = ["This is going to be way too long." * 1_5_0, "short example"] _UpperCAmelCase = ["not super long but more than 5 tokens", "tiny"] _UpperCAmelCase = self._large_tokenizer(snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" ) _UpperCAmelCase = self._large_tokenizer( text_target=snake_case_ , max_length=5 , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" ) assert batch.input_ids.shape == (2, 1_0_2_4) assert batch.attention_mask.shape == (2, 1_0_2_4) assert targets["input_ids"].shape == (2, 5) assert len(snake_case_ ) == 2 # input_ids, attention_mask. @slow def lowercase ( self : Dict ): # fmt: off _UpperCAmelCase = {"input_ids": [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case_ , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , ) @require_sentencepiece @require_tokenizers class A_ ( lowerCAmelCase_ , unittest.TestCase ): _lowerCamelCase : List[str] = PegasusTokenizer _lowerCamelCase : List[Any] = PegasusTokenizerFast _lowerCamelCase : int = True _lowerCamelCase : Union[str, Any] = True def lowercase ( self : Any ): super().setUp() # We have a SentencePiece fixture for testing _UpperCAmelCase = PegasusTokenizer(snake_case_ , offset=0 , mask_token_sent=snake_case_ , mask_token="[MASK]" ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase ( self : Tuple ): return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" ) def lowercase ( self : Optional[Any] , **snake_case_ : Dict ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def lowercase ( self : Union[str, Any] , snake_case_ : str ): return ("This is a test", "This is a test") def lowercase ( self : List[str] ): _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) _UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname ) _UpperCAmelCase = ( "Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>" " <pad> <pad> <pad>" ) _UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0] _UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0] self.assertListEqual(snake_case_ , snake_case_ ) @require_torch def lowercase ( self : Tuple ): _UpperCAmelCase = ["This is going to be way too long." * 1_0_0_0, "short example"] _UpperCAmelCase = ["not super long but more than 5 tokens", "tiny"] _UpperCAmelCase = self._large_tokenizer(snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" ) _UpperCAmelCase = self._large_tokenizer( text_target=snake_case_ , max_length=5 , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" ) assert batch.input_ids.shape == (2, 4_0_9_6) assert batch.attention_mask.shape == (2, 4_0_9_6) assert targets["input_ids"].shape == (2, 5) assert len(snake_case_ ) == 2 # input_ids, attention_mask. def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = ( "This is an example string that is used to test the original TF implementation against the HF" " implementation" ) _UpperCAmelCase = self._large_tokenizer(snake_case_ ).input_ids self.assertListEqual( snake_case_ , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
22
0
import torch from transformers import AutoModel class __lowercase (torch.nn.Module ): """simple docstring""" def __init__( self , A="sayef/fsner-bert-base-uncased" ) -> int: super(snake_case_ , self ).__init__() snake_case : Tuple = AutoModel.from_pretrained(snake_case_ , return_dict=snake_case_ ) snake_case : List[str] = torch.nn.CosineSimilarity(3 , 1e-0_8 ) snake_case : List[Any] = torch.nn.Softmax(dim=1 ) def UpperCAmelCase ( self , **A ) -> Optional[int]: return self.bert(**snake_case_ ).last_hidden_state def UpperCAmelCase ( self , A ) -> Optional[int]: return token_embeddings.sum(2 , keepdim=snake_case_ ) def UpperCAmelCase ( self , A , A , A=1 ) -> Tuple: return self.softmax(T * self.cos(snake_case_ , snake_case_ ) ) def UpperCAmelCase ( self , A , A ) -> Any: snake_case : Any = W_supports["""sizes"""].tolist() snake_case : Union[str, Any] = W_supports["""start_token_id"""].item() snake_case : Tuple = W_supports["""end_token_id"""].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] snake_case : List[str] = self.BERT(**snake_case_ ) snake_case : Any = self.BERT(**snake_case_ ) snake_case : List[str] = None snake_case : Optional[int] = None snake_case : Optional[int] = W_supports["""input_ids"""] == start_token_id snake_case : Dict = W_supports["""input_ids"""] == end_token_id for i, size in enumerate(snake_case_ ): if i == 0: snake_case : Optional[Any] = 0 else: snake_case : Tuple = support_sizes[i - 1] snake_case : str = S[s : s + size][start_token_masks[s : s + size]] snake_case : Any = S[s : s + size][end_token_masks[s : s + size]] snake_case : Optional[int] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) snake_case : List[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: snake_case : Optional[Any] = torch.vstack((p_starts, p_start) ) snake_case : Optional[int] = torch.vstack((p_ends, p_end) ) else: snake_case : Optional[Any] = p_start snake_case : int = p_end return p_starts, p_ends
124
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class A_ ( unittest.TestCase ): def lowercase ( self : int ): _UpperCAmelCase = tempfile.mkdtemp() _UpperCAmelCase = BlipImageProcessor() _UpperCAmelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) _UpperCAmelCase = BlipProcessor(snake_case_ , snake_case_ ) processor.save_pretrained(self.tmpdirname ) def lowercase ( self : Tuple , **snake_case_ : int ): return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).tokenizer def lowercase ( self : Dict , **snake_case_ : Any ): return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).image_processor def lowercase ( self : int ): shutil.rmtree(self.tmpdirname ) def lowercase ( self : Optional[Any] ): _UpperCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] _UpperCAmelCase = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase ( self : int ): _UpperCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) _UpperCAmelCase = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 ) _UpperCAmelCase = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case_ ) def lowercase ( self : Any ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = image_processor(snake_case_ , return_tensors="np" ) _UpperCAmelCase = processor(images=snake_case_ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase ( self : Optional[int] ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _UpperCAmelCase = "lower newer" _UpperCAmelCase = processor(text=snake_case_ ) _UpperCAmelCase = tokenizer(snake_case_ , return_token_type_ids=snake_case_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase ( self : Optional[Any] ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _UpperCAmelCase = "lower newer" _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = processor(text=snake_case_ , images=snake_case_ ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(snake_case_ ): processor() def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCAmelCase = processor.batch_decode(snake_case_ ) _UpperCAmelCase = tokenizer.batch_decode(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) def lowercase ( self : str ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _UpperCAmelCase = "lower newer" _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = processor(text=snake_case_ , images=snake_case_ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
22
0
from sklearn.metrics import mean_squared_error import datasets __A : int = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' __A : str = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' __A : List[Any] = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ (datasets.Metric ): def _lowercase ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def _lowercase ( self ): '''simple docstring''' if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def _lowercase ( self , _A , _A , _A=None , _A="uniform_average" , _A=True ): '''simple docstring''' UpperCAmelCase = mean_squared_error( snake_case_ , snake_case_ , sample_weight=snake_case_ , multioutput=snake_case_ , squared=snake_case_ ) return {"mse": mse}
273
'''simple docstring''' import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def UpperCAmelCase_ ( __lowercase : str ) -> List[str]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = image.size _UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 _UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) _UpperCAmelCase = np.array(__lowercase ).astype(np.floataa ) / 255.0 _UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 ) _UpperCAmelCase = torch.from_numpy(__lowercase ) return 2.0 * image - 1.0 class A_ ( lowerCAmelCase_ ): def __init__( self : Optional[Any] , snake_case_ : VQModel , snake_case_ : UNetaDModel , snake_case_ : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): super().__init__() self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ ) @torch.no_grad() def __call__( self : Any , snake_case_ : Union[torch.Tensor, PIL.Image.Image] = None , snake_case_ : Optional[int] = 1 , snake_case_ : Optional[int] = 1_0_0 , snake_case_ : Optional[float] = 0.0 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ): if isinstance(snake_case_ , PIL.Image.Image ): _UpperCAmelCase = 1 elif isinstance(snake_case_ , torch.Tensor ): _UpperCAmelCase = image.shape[0] else: raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}' ) if isinstance(snake_case_ , PIL.Image.Image ): _UpperCAmelCase = preprocess(snake_case_ ) _UpperCAmelCase , _UpperCAmelCase = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image _UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width) _UpperCAmelCase = next(self.unet.parameters() ).dtype _UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ ) _UpperCAmelCase = image.to(device=self.device , dtype=snake_case_ ) # set timesteps and move to the correct device self.scheduler.set_timesteps(snake_case_ , device=self.device ) _UpperCAmelCase = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler _UpperCAmelCase = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _UpperCAmelCase = {} if accepts_eta: _UpperCAmelCase = eta for t in self.progress_bar(snake_case_ ): # concat latents and low resolution image in the channel dimension. _UpperCAmelCase = torch.cat([latents, image] , dim=1 ) _UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ ) # predict the noise residual _UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample # compute the previous noisy sample x_t -> x_t-1 _UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample # decode the image latents with the VQVAE _UpperCAmelCase = self.vqvae.decode(snake_case_ ).sample _UpperCAmelCase = torch.clamp(snake_case_ , -1.0 , 1.0 ) _UpperCAmelCase = image / 2 + 0.5 _UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _UpperCAmelCase = self.numpy_to_pil(snake_case_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=snake_case_ )
22
0
from itertools import product def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ): """simple docstring""" A_ = sides_number A_ = max_face_number * dice_number A_ = [0] * (max_total + 1) A_ = 1 A_ = range(__lowercase ,max_face_number + 1 ) for dice_numbers in product(__lowercase ,repeat=__lowercase ): A_ = sum(__lowercase ) totals_frequencies[total] += 1 return totals_frequencies def __snake_case ( ): """simple docstring""" A_ = total_frequency_distribution( sides_number=4 ,dice_number=9 ) A_ = total_frequency_distribution( sides_number=6 ,dice_number=6 ) A_ = 0 A_ = 9 A_ = 4 * 9 A_ = 6 for peter_total in range(__lowercase ,max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) A_ = (4**9) * (6**6) A_ = peter_wins_count / total_games_number A_ = round(__lowercase ,ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F"{solution() = }")
312
'''simple docstring''' import string from math import logaa def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> int: '''simple docstring''' _UpperCAmelCase = document.translate( str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" ) _UpperCAmelCase = document_without_punctuation.split(" " ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> tuple[int, int]: '''simple docstring''' _UpperCAmelCase = corpus.lower().translate( str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with '' _UpperCAmelCase = corpus_without_punctuation.split("\n" ) _UpperCAmelCase = term.lower() return (len([doc for doc in docs if term in doc] ), len(__lowercase )) def UpperCAmelCase_ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any]=False ) -> float: '''simple docstring''' if smoothing: if n == 0: raise ValueError("log10(0) is undefined." ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("df must be > 0" ) elif n == 0: raise ValueError("log10(0) is undefined." ) return round(logaa(n / df ) , 3 ) def UpperCAmelCase_ ( __lowercase : int , __lowercase : int ) -> float: '''simple docstring''' return round(tf * idf , 3 )
22
0
from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class __lowercase (lowerCAmelCase_ ): _UpperCamelCase = DistilBertTokenizer _UpperCamelCase = DistilBertTokenizerFast _UpperCamelCase = True @slow def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : int = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' ) __lowerCAmelCase : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=snake_case_ ) __lowerCAmelCase : Optional[int] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=snake_case_ ) __lowerCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(snake_case_ ) __lowerCAmelCase : int = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
275
'''simple docstring''' from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
22
0
"""simple docstring""" import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) __SCREAMING_SNAKE_CASE =logging.getLogger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : str ): lowercase_ : List[str] = git.Repo(search_parent_directories=__lowercase ) lowercase_ : List[Any] = { 'repo_id': str(__lowercase ), 'repo_sha': str(repo.head.object.hexsha ), 'repo_branch': str(repo.active_branch ), } with open(os.path.join(__lowercase , 'git_log.json' ) , 'w' ) as f: json.dump(__lowercase , __lowercase , indent=4 ) def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ): if params.n_gpu <= 0: lowercase_ : Dict = 0 lowercase_ : str = -1 lowercase_ : Optional[Any] = True lowercase_ : Optional[Any] = False return assert torch.cuda.is_available() logger.info('Initializing GPUs' ) if params.n_gpu > 1: assert params.local_rank != -1 lowercase_ : List[Any] = int(os.environ['WORLD_SIZE'] ) lowercase_ : Tuple = int(os.environ['N_GPU_NODE'] ) lowercase_ : Dict = int(os.environ['RANK'] ) # number of nodes / node ID lowercase_ : Optional[int] = params.world_size // params.n_gpu_per_node lowercase_ : Optional[int] = params.global_rank // params.n_gpu_per_node lowercase_ : List[Any] = True assert params.n_nodes == int(os.environ['N_NODES'] ) assert params.node_id == int(os.environ['NODE_RANK'] ) # local job (single GPU) else: assert params.local_rank == -1 lowercase_ : Any = 1 lowercase_ : Union[str, Any] = 0 lowercase_ : Optional[Any] = 0 lowercase_ : List[Any] = 0 lowercase_ : List[str] = 1 lowercase_ : str = 1 lowercase_ : List[Any] = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode lowercase_ : Tuple = params.node_id == 0 and params.local_rank == 0 lowercase_ : Optional[int] = params.n_nodes > 1 # summary lowercase_ : Optional[int] = F'''--- Global rank: {params.global_rank} - ''' logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes ) logger.info(PREFIX + 'Node ID : %i' % params.node_id ) logger.info(PREFIX + 'Local rank : %i' % params.local_rank ) logger.info(PREFIX + 'World size : %i' % params.world_size ) logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node ) logger.info(PREFIX + 'Master : %s' % str(params.is_master ) ) logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) ) logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) ) logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info('Initializing PyTorch distributed' ) torch.distributed.init_process_group( init_method='env://' , backend='nccl' , ) def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ): np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
213
'''simple docstring''' def UpperCAmelCase_ ( __lowercase : int ) -> int: '''simple docstring''' if not isinstance(__lowercase , __lowercase ) or number < 0: raise ValueError("Input must be a non-negative integer" ) _UpperCAmelCase = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
22
0
'''simple docstring''' import math class __A : '''simple docstring''' def __init__(self , A=0 ) -> Tuple: # a graph with Node 0,1,...,N-1 """simple docstring""" _a = n _a = [ [math.inf for j in range(0 , snake_case_ )] for i in range(0 , snake_case_ ) ] # adjacency matrix for weight _a = [ [math.inf for j in range(0 , snake_case_ )] for i in range(0 , snake_case_ ) ] # dp[i][j] stores minimum distance from i to j def a__ (self , A , A , A ) -> str: """simple docstring""" _a = w def a__ (self ) -> str: """simple docstring""" for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): _a = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def a__ (self , A , A ) -> List[str]: """simple docstring""" return self.dp[u][v] if __name__ == "__main__": lowercase_ = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
211
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar __SCREAMING_SNAKE_CASE :Optional[int] = TypeVar('''T''') class A_ ( Generic[T] ): def __init__( self : List[Any] , snake_case_ : list[T] , snake_case_ : Callable[[T, T], T] ): _UpperCAmelCase = None _UpperCAmelCase = len(snake_case_ ) _UpperCAmelCase = [any_type for _ in range(self.N )] + arr _UpperCAmelCase = fnc self.build() def lowercase ( self : List[Any] ): for p in range(self.N - 1 , 0 , -1 ): _UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : T ): p += self.N _UpperCAmelCase = v while p > 1: _UpperCAmelCase = p // 2 _UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def lowercase ( self : Any , snake_case_ : int , snake_case_ : int ): # noqa: E741 _UpperCAmelCase , _UpperCAmelCase = l + self.N, r + self.N _UpperCAmelCase = None while l <= r: if l % 2 == 1: _UpperCAmelCase = self.st[l] if res is None else self.fn(snake_case_ , self.st[l] ) if r % 2 == 0: _UpperCAmelCase = self.st[r] if res is None else self.fn(snake_case_ , self.st[r] ) _UpperCAmelCase , _UpperCAmelCase = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce __SCREAMING_SNAKE_CASE :Union[str, Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] __SCREAMING_SNAKE_CASE :List[str] = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } __SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, min) __SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, max) __SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, lambda a, b: a + b) def UpperCAmelCase_ ( ) -> None: '''simple docstring''' for i in range(len(__lowercase ) ): for j in range(__lowercase , len(__lowercase ) ): _UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] ) _UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] ) _UpperCAmelCase = reduce(lambda __lowercase , __lowercase : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(__lowercase , __lowercase ) assert max_range == max_segment_tree.query(__lowercase , __lowercase ) assert sum_range == sum_segment_tree.query(__lowercase , __lowercase ) test_all_segments() for index, value in test_updates.items(): __SCREAMING_SNAKE_CASE :str = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
22
0
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __snake_case : List[Any] = logging.get_logger(__name__) __snake_case : Dict = { '''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''', } class A__ ( lowerCAmelCase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE = """deta""" SCREAMING_SNAKE_CASE = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: int=900 , _SCREAMING_SNAKE_CASE: Tuple=2048 , _SCREAMING_SNAKE_CASE: Union[str, Any]=6 , _SCREAMING_SNAKE_CASE: Tuple=2048 , _SCREAMING_SNAKE_CASE: int=8 , _SCREAMING_SNAKE_CASE: str=6 , _SCREAMING_SNAKE_CASE: int=1024 , _SCREAMING_SNAKE_CASE: str=8 , _SCREAMING_SNAKE_CASE: Tuple=0.0 , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: str="relu" , _SCREAMING_SNAKE_CASE: Optional[int]=256 , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: int=0.0 , _SCREAMING_SNAKE_CASE: Dict=0.0 , _SCREAMING_SNAKE_CASE: List[Any]=0.02 , _SCREAMING_SNAKE_CASE: Optional[int]=1.0 , _SCREAMING_SNAKE_CASE: Tuple=True , _SCREAMING_SNAKE_CASE: Dict=False , _SCREAMING_SNAKE_CASE: Any="sine" , _SCREAMING_SNAKE_CASE: int=5 , _SCREAMING_SNAKE_CASE: Union[str, Any]=4 , _SCREAMING_SNAKE_CASE: List[Any]=4 , _SCREAMING_SNAKE_CASE: Optional[int]=True , _SCREAMING_SNAKE_CASE: Optional[Any]=300 , _SCREAMING_SNAKE_CASE: str=True , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: Dict=1 , _SCREAMING_SNAKE_CASE: Optional[int]=5 , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: int=1 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1 , _SCREAMING_SNAKE_CASE: Dict=5 , _SCREAMING_SNAKE_CASE: Union[str, Any]=2 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=0.25 , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> Optional[Any]: """simple docstring""" if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") __lowerCAmelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"]) else: if isinstance(snake_case_ , snake_case_): __lowerCAmelCase : Optional[Any] = backbone_config.pop("model_type") __lowerCAmelCase : Any = CONFIG_MAPPING[backbone_model_type] __lowerCAmelCase : Dict = config_class.from_dict(snake_case_) __lowerCAmelCase : List[Any] = backbone_config __lowerCAmelCase : Optional[int] = num_queries __lowerCAmelCase : Union[str, Any] = max_position_embeddings __lowerCAmelCase : List[Any] = d_model __lowerCAmelCase : Any = encoder_ffn_dim __lowerCAmelCase : List[str] = encoder_layers __lowerCAmelCase : List[Any] = encoder_attention_heads __lowerCAmelCase : Union[str, Any] = decoder_ffn_dim __lowerCAmelCase : Dict = decoder_layers __lowerCAmelCase : Union[str, Any] = decoder_attention_heads __lowerCAmelCase : Dict = dropout __lowerCAmelCase : Any = attention_dropout __lowerCAmelCase : Any = activation_dropout __lowerCAmelCase : Optional[int] = activation_function __lowerCAmelCase : Union[str, Any] = init_std __lowerCAmelCase : int = init_xavier_std __lowerCAmelCase : Optional[Any] = encoder_layerdrop __lowerCAmelCase : List[Any] = auxiliary_loss __lowerCAmelCase : Union[str, Any] = position_embedding_type # deformable attributes __lowerCAmelCase : Optional[int] = num_feature_levels __lowerCAmelCase : Optional[int] = encoder_n_points __lowerCAmelCase : Any = decoder_n_points __lowerCAmelCase : str = two_stage __lowerCAmelCase : Dict = two_stage_num_proposals __lowerCAmelCase : str = with_box_refine __lowerCAmelCase : Optional[Any] = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True.") # Hungarian matcher __lowerCAmelCase : Dict = class_cost __lowerCAmelCase : Optional[int] = bbox_cost __lowerCAmelCase : Union[str, Any] = giou_cost # Loss coefficients __lowerCAmelCase : Any = mask_loss_coefficient __lowerCAmelCase : int = dice_loss_coefficient __lowerCAmelCase : str = bbox_loss_coefficient __lowerCAmelCase : Tuple = giou_loss_coefficient __lowerCAmelCase : List[Any] = eos_coefficient __lowerCAmelCase : Any = focal_alpha super().__init__(is_encoder_decoder=snake_case_ , **snake_case_) @property def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[Any]: """simple docstring""" return self.encoder_attention_heads @property def _SCREAMING_SNAKE_CASE ( self: Dict) -> Optional[int]: """simple docstring""" return self.d_model def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]: """simple docstring""" __lowerCAmelCase : str = copy.deepcopy(self.__dict__) __lowerCAmelCase : str = self.backbone_config.to_dict() __lowerCAmelCase : List[Any] = self.__class__.model_type return output
269
'''simple docstring''' import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( "kwargs, expected" , [ ({"num_shards": 0, "max_num_jobs": 1}, []), ({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]), ({"num_shards": 10, "max_num_jobs": 10}, [range(__lowercase , i + 1 ) for i in range(10 )]), ({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]), ({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = _distribute_shards(**__lowercase ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, max_num_jobs, expected" , [ ({"foo": 0}, 10, [{"foo": 0}]), ({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]), ({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]), ({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]), ({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]), ] , ) def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : Optional[Any] , __lowercase : int ) -> str: '''simple docstring''' _UpperCAmelCase = _split_gen_kwargs(__lowercase , __lowercase ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, expected" , [ ({"foo": 0}, 1), ({"shards": [0]}, 1), ({"shards": [0, 1, 2, 3]}, 4), ({"shards": [0, 1, 2, 3], "foo": 0}, 4), ({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4), ({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError), ] , ) def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : List[Any] ) -> List[Any]: '''simple docstring''' if expected is RuntimeError: with pytest.raises(__lowercase ): _number_of_shards_in_gen_kwargs(__lowercase ) else: _UpperCAmelCase = _number_of_shards_in_gen_kwargs(__lowercase ) assert out == expected
22
0
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class A_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self ): lowercase = torch.nn.Linear(10 , 10 ) lowercase = torch.optim.SGD(model.parameters() , 0.1 ) lowercase = Accelerator() lowercase = accelerator.prepare(snake_case_ ) try: pickle.loads(pickle.dumps(snake_case_ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
195
'''simple docstring''' import math def UpperCAmelCase_ ( __lowercase : int ) -> bool: '''simple docstring''' return math.sqrt(__lowercase ) * math.sqrt(__lowercase ) == num def UpperCAmelCase_ ( __lowercase : int ) -> bool: '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = n while left <= right: _UpperCAmelCase = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: _UpperCAmelCase = mid - 1 else: _UpperCAmelCase = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
22
0
"""simple docstring""" import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a = logging.get_logger(__name__) a = {'''vocab_file''': '''vocab.json'''} a = { '''vocab_file''': { '''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''', } } a = {'''mgp-str''': 2_7} class SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ ): _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]="[GO]" , lowerCAmelCase : Optional[Any]="[GO]" , lowerCAmelCase : Union[str, Any]="[s]" , lowerCAmelCase : Any="[GO]" , **lowerCAmelCase : Dict ): super().__init__( unk_token=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , pad_token=snake_case_ , **snake_case_ , ) with open(snake_case_ , encoding="""utf-8""" ) as vocab_handle: lowerCAmelCase = json.load(snake_case_ ) lowerCAmelCase = {v: k for k, v in self.vocab.items()} @property def __lowercase ( self : str ): return len(self.vocab ) def __lowercase ( self : int ): return dict(self.vocab , **self.added_tokens_encoder ) def __lowercase ( self : Optional[Any] , lowerCAmelCase : int ): lowerCAmelCase = [] for s in text: char_tokens.extend(snake_case_ ) return char_tokens def __lowercase ( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] ): return self.vocab.get(snake_case_ , self.vocab.get(self.unk_token ) ) def __lowercase ( self : int , lowerCAmelCase : int ): return self.decoder.get(snake_case_ ) def __lowercase ( self : int , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ): if not os.path.isdir(snake_case_ ): logger.error("""Vocabulary path ({}) should be a directory""".format(snake_case_ ) ) return lowerCAmelCase = os.path.join( snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) with open(snake_case_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + """\n""" ) return (vocab_file,)
155
'''simple docstring''' import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin __SCREAMING_SNAKE_CASE :Dict = 1e-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class A_ : def __init__( self : List[Any] , snake_case_ : int , snake_case_ : Dict=1_6 , snake_case_ : Dict=1_3 , snake_case_ : int=7 , snake_case_ : Any=1_4 , snake_case_ : int=1_0 , snake_case_ : Any=1_9 , snake_case_ : int=5 , snake_case_ : Any=4 , snake_case_ : Tuple=True , snake_case_ : Optional[int]=1_6 , snake_case_ : List[str]=2 , snake_case_ : Any=4 , snake_case_ : List[Any]=4 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Optional[int]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Tuple=[1, 2, 3, 4, 5] , snake_case_ : str=2_5 , snake_case_ : Any=5 , ): _UpperCAmelCase = d_model _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = prediction_length _UpperCAmelCase = context_length _UpperCAmelCase = cardinality _UpperCAmelCase = num_time_features _UpperCAmelCase = lags_sequence _UpperCAmelCase = embedding_dimension _UpperCAmelCase = is_training _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = context_length _UpperCAmelCase = prediction_length + label_length _UpperCAmelCase = label_length _UpperCAmelCase = moving_average _UpperCAmelCase = autocorrelation_factor def lowercase ( self : Union[str, Any] ): return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def lowercase ( self : int , snake_case_ : Optional[Any] ): _UpperCAmelCase = config.context_length + max(config.lags_sequence ) _UpperCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) _UpperCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) _UpperCAmelCase = floats_tensor([self.batch_size, _past_length] ) _UpperCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs _UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) _UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length] ) _UpperCAmelCase = { "past_values": past_values, "static_categorical_features": static_categorical_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def lowercase ( self : List[Any] ): _UpperCAmelCase = self.get_config() _UpperCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ ) return config, inputs_dict def lowercase ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs() return config, inputs_dict def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : Optional[int] ): _UpperCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval() _UpperCAmelCase = model(**snake_case_ ) _UpperCAmelCase = outputs.encoder_last_hidden_state _UpperCAmelCase = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = model.get_encoder() encoder.save_pretrained(snake_case_ ) _UpperCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = model.create_network_inputs(**snake_case_ ) _UpperCAmelCase , _UpperCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) _UpperCAmelCase = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) _UpperCAmelCase = encoder(inputs_embeds=snake_case_ )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) _UpperCAmelCase = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) _UpperCAmelCase = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) _UpperCAmelCase = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) _UpperCAmelCase = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = model.get_decoder() decoder.save_pretrained(snake_case_ ) _UpperCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ ) _UpperCAmelCase = decoder( trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _lowerCamelCase : List[Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () _lowerCamelCase : Tuple = (AutoformerForPrediction,) if is_torch_available() else () _lowerCamelCase : List[Any] = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {} _lowerCamelCase : Optional[Any] = False _lowerCamelCase : Tuple = False _lowerCamelCase : int = False _lowerCamelCase : Optional[Any] = False _lowerCamelCase : Optional[Any] = False _lowerCamelCase : List[Any] = False def lowercase ( self : Tuple ): _UpperCAmelCase = AutoformerModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ ) def lowercase ( self : Optional[Any] ): self.config_tester.run_common_tests() def lowercase ( self : Union[str, Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case_ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(snake_case_ ) _UpperCAmelCase , _UpperCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ ) self.assertEqual(info["missing_keys"] , [] ) def lowercase ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ ) @unittest.skip(reason="Model has no tokens embeddings" ) def lowercase ( self : Optional[int] ): pass def lowercase ( self : Optional[int] ): _UpperCAmelCase = inspect.signature(getattr(snake_case_ , "forward" ) ) # The main input is the name of the argument after `self` _UpperCAmelCase = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , snake_case_ ) def lowercase ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case_ ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("future_observed_mask" ) expected_arg_names.extend( [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ ) def lowercase ( self : Optional[int] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = True _UpperCAmelCase = getattr(self.model_tester , "seq_length" , snake_case_ ) _UpperCAmelCase = getattr(self.model_tester , "decoder_seq_length" , snake_case_ ) _UpperCAmelCase = getattr(self.model_tester , "encoder_seq_length" , snake_case_ ) _UpperCAmelCase = getattr(self.model_tester , "d_model" , snake_case_ ) _UpperCAmelCase = getattr(self.model_tester , "num_attention_heads" , snake_case_ ) _UpperCAmelCase = d_model // num_attention_heads for model_class in self.all_model_classes: _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) _UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) _UpperCAmelCase = outputs.encoder_attentions self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) _UpperCAmelCase = len(snake_case_ ) _UpperCAmelCase = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(snake_case_ , snake_case_ ) # decoder attentions _UpperCAmelCase = outputs.decoder_attentions self.assertIsInstance(snake_case_ , (list, tuple) ) self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions _UpperCAmelCase = outputs.cross_attentions self.assertIsInstance(snake_case_ , (list, tuple) ) self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) self.assertEqual(out_len + 2 , len(snake_case_ ) ) _UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def lowercase ( self : Dict ): super().test_retain_grad_hidden_states_attentions() def UpperCAmelCase_ ( __lowercase : str="train-batch.pt" ) -> List[str]: '''simple docstring''' _UpperCAmelCase = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__lowercase , repo_type="dataset" ) _UpperCAmelCase = torch.load(__lowercase , map_location=__lowercase ) return batch @require_torch @slow class A_ ( unittest.TestCase ): def lowercase ( self : Optional[int] ): _UpperCAmelCase = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ ) _UpperCAmelCase = prepare_batch() with torch.no_grad(): _UpperCAmelCase = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0] _UpperCAmelCase = torch.Size( (6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , snake_case_ ) _UpperCAmelCase = torch.tensor( [[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=snake_case_ ) self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : Optional[Any] ): _UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ ) _UpperCAmelCase = prepare_batch("val-batch.pt" ) with torch.no_grad(): _UpperCAmelCase = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state _UpperCAmelCase = torch.Size((6_4, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , snake_case_ ) _UpperCAmelCase = torch.tensor( [[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=snake_case_ ) self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : Tuple ): _UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ ) _UpperCAmelCase = prepare_batch("val-batch.pt" ) with torch.no_grad(): _UpperCAmelCase = model.generate( static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , ) _UpperCAmelCase = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , snake_case_ ) _UpperCAmelCase = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=snake_case_ ) _UpperCAmelCase = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
22
0
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class a ( unittest.TestCase ): def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=9_9 , _lowerCamelCase=3_2 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=1_6 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=4 , ): lowercase = parent lowercase = batch_size lowercase = seq_length lowercase = is_training lowercase = use_attention_mask lowercase = use_token_type_ids lowercase = use_labels lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = type_vocab_size lowercase = type_sequence_label_size lowercase = initializer_range lowercase = num_choices def UpperCamelCase_ ( self ): lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase = None if self.use_attention_mask: lowercase = random_attention_mask([self.batch_size, self.seq_length] ) lowercase = None if self.use_token_type_ids: lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def UpperCamelCase_ ( self ): lowercase = self.prepare_config_and_inputs() lowercase , lowercase , lowercase , lowercase = config_and_inputs lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class a ( lowerCAmelCase_, unittest.TestCase ): UpperCAmelCase_ : Tuple =True UpperCAmelCase_ : int =( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def UpperCamelCase_ ( self ): lowercase = FlaxRoFormerModelTester(self ) @slow def UpperCamelCase_ ( self ): for model_class_name in self.all_model_classes: lowercase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=snake_case_ ) lowercase = model(np.ones((1, 1) ) ) self.assertIsNotNone(snake_case_ ) @require_flax class a ( unittest.TestCase ): @slow def UpperCamelCase_ ( self ): lowercase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' ) lowercase = jnp.array([[0, 1, 2, 3, 4, 5]] ) lowercase = model(snake_case_ )[0] lowercase = 5_0_0_0_0 lowercase = (1, 6, vocab_size) self.assertEqual(output.shape , snake_case_ ) lowercase = jnp.array( [[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , snake_case_ , atol=1e-4 ) )
220
'''simple docstring''' from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging __SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__) class A_ : _lowerCamelCase : str _lowerCamelCase : str = None @staticmethod def lowercase ( ): raise NotImplementedError def lowercase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : str , **snake_case_ : List[Any] ): raise NotImplementedError def lowercase ( self : Any , snake_case_ : int ): raise NotImplementedError def lowercase ( self : List[str] ): if not self.is_available(): raise RuntimeError( f'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' ) @classmethod def lowercase ( cls : List[Any] ): return f'`pip install {cls.pip_package or cls.name}`' class A_ ( lowerCAmelCase_ ): _lowerCamelCase : int = """optuna""" @staticmethod def lowercase ( ): return is_optuna_available() def lowercase ( self : List[str] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : Tuple ): return run_hp_search_optuna(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) def lowercase ( self : int , snake_case_ : Optional[int] ): return default_hp_space_optuna(snake_case_ ) class A_ ( lowerCAmelCase_ ): _lowerCamelCase : Any = """ray""" _lowerCamelCase : Tuple = """'ray[tune]'""" @staticmethod def lowercase ( ): return is_ray_available() def lowercase ( self : Optional[Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : List[str] ): return run_hp_search_ray(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) def lowercase ( self : Any , snake_case_ : str ): return default_hp_space_ray(snake_case_ ) class A_ ( lowerCAmelCase_ ): _lowerCamelCase : int = """sigopt""" @staticmethod def lowercase ( ): return is_sigopt_available() def lowercase ( self : Any , snake_case_ : int , snake_case_ : int , snake_case_ : str , **snake_case_ : Dict ): return run_hp_search_sigopt(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) def lowercase ( self : Dict , snake_case_ : Optional[Any] ): return default_hp_space_sigopt(snake_case_ ) class A_ ( lowerCAmelCase_ ): _lowerCamelCase : Optional[int] = """wandb""" @staticmethod def lowercase ( ): return is_wandb_available() def lowercase ( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : str , **snake_case_ : Optional[Any] ): return run_hp_search_wandb(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) def lowercase ( self : Any , snake_case_ : Union[str, Any] ): return default_hp_space_wandb(snake_case_ ) __SCREAMING_SNAKE_CASE :Dict = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def UpperCAmelCase_ ( ) -> str: '''simple docstring''' _UpperCAmelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(__lowercase ) > 0: _UpperCAmelCase = available_backends[0].name if len(__lowercase ) > 1: logger.info( f'{len(__lowercase )} hyperparameter search backends available. Using {name} as the default.' ) return name raise RuntimeError( "No hyperparameter search backend available.\n" + "\n".join( f' - To install {backend.name} run {backend.pip_install()}' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
22
0
'''simple docstring''' import math def __UpperCamelCase ( lowercase__ : float, lowercase__ : float ): '''simple docstring''' return math.pow(__lowercase, 2 ) - a def __UpperCamelCase ( lowercase__ : float ): '''simple docstring''' return 2 * x def __UpperCamelCase ( lowercase__ : float ): '''simple docstring''' __lowercase =2.0 while start <= a: __lowercase =math.pow(__lowercase, 2 ) return start def __UpperCamelCase ( lowercase__ : float, lowercase__ : int = 99_99, lowercase__ : float = 0.00000000000001 ): '''simple docstring''' if a < 0: raise ValueError('math domain error' ) __lowercase =get_initial_point(__lowercase ) for _ in range(__lowercase ): __lowercase =value __lowercase =value - fx(__lowercase, __lowercase ) / fx_derivative(__lowercase ) if abs(prev_value - value ) < tolerance: return value return value if __name__ == "__main__": from doctest import testmod testmod()
141
'''simple docstring''' __SCREAMING_SNAKE_CASE :List[str] = '''0.18.2''' from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
22
0
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list: snake_case : Dict = int(__lowercase ) if n_element < 1: snake_case : int = ValueError("""a should be a positive number""" ) raise my_error snake_case : Any = [1] snake_case , snake_case , snake_case : int = (0, 0, 0) snake_case : Union[str, Any] = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 ,hamming_list[j] * 3 ,hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": lowerCamelCase : Any = input('Enter the last number (nth term) of the Hamming Number Series: ') print('Formula of Hamming Number Series => 2^i * 3^j * 5^k') lowerCamelCase : Tuple = hamming(int(n)) print('-----------------------------------------------------') print(f"""The list with nth numbers is: {hamming_numbers}""") print('-----------------------------------------------------')
124
'''simple docstring''' import re from filelock import FileLock try: import nltk __SCREAMING_SNAKE_CASE :Optional[int] = True except (ImportError, ModuleNotFoundError): __SCREAMING_SNAKE_CASE :str = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def UpperCAmelCase_ ( __lowercase : str ) -> str: '''simple docstring''' re.sub("<n>" , "" , __lowercase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__lowercase ) )
22
0
from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar __A : Optional[int] = TypeVar("T") class A_ (Generic[T] ): def __init__( self , _A , _A ): '''simple docstring''' UpperCAmelCase = None UpperCAmelCase = len(snake_case_ ) UpperCAmelCase = [any_type for _ in range(self.N )] + arr UpperCAmelCase = fnc self.build() def _lowercase ( self ): '''simple docstring''' for p in range(self.N - 1 , 0 , -1 ): UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def _lowercase ( self , _A , _A ): '''simple docstring''' p += self.N UpperCAmelCase = v while p > 1: UpperCAmelCase = p // 2 UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def _lowercase ( self , _A , _A ): # noqa: E741 '''simple docstring''' UpperCAmelCase , UpperCAmelCase = l + self.N, r + self.N UpperCAmelCase = None while l <= r: if l % 2 == 1: UpperCAmelCase = self.st[l] if res is None else self.fn(snake_case_ , self.st[l] ) if r % 2 == 0: UpperCAmelCase = self.st[r] if res is None else self.fn(snake_case_ , self.st[r] ) UpperCAmelCase , UpperCAmelCase = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce __A : Union[str, Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] __A : List[str] = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } __A : Any = SegmentTree(test_array, min) __A : Any = SegmentTree(test_array, max) __A : Any = SegmentTree(test_array, lambda a, b: a + b) def __SCREAMING_SNAKE_CASE ( ) -> None: '''simple docstring''' for i in range(len(__lowercase ) ): for j in range(__lowercase , len(__lowercase ) ): UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] ) UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] ) UpperCAmelCase = reduce(lambda UpperCamelCase__ , UpperCamelCase__ : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(__lowercase , __lowercase ) assert max_range == max_segment_tree.query(__lowercase , __lowercase ) assert sum_range == sum_segment_tree.query(__lowercase , __lowercase ) test_all_segments() for index, value in test_updates.items(): __A : str = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
273
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class A_ : def __init__( self : str , snake_case_ : int , snake_case_ : Union[str, Any]=2 , snake_case_ : List[Any]=True , snake_case_ : str=False , snake_case_ : str=1_0 , snake_case_ : str=3 , snake_case_ : Dict=3_2 * 4 , snake_case_ : Any=3_2 * 6 , snake_case_ : Optional[Any]=4 , snake_case_ : Optional[int]=3_2 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = is_training _UpperCAmelCase = use_auxiliary_loss _UpperCAmelCase = num_queries _UpperCAmelCase = num_channels _UpperCAmelCase = min_size _UpperCAmelCase = max_size _UpperCAmelCase = num_labels _UpperCAmelCase = mask_feature_size def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( snake_case_ ) _UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_ ) _UpperCAmelCase = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_ ) > 0.5 ).float() _UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=snake_case_ ) > 0.5).long() _UpperCAmelCase = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase ( self : List[Any] ): return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def lowercase ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def lowercase ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ): _UpperCAmelCase = output.encoder_hidden_states _UpperCAmelCase = output.pixel_decoder_hidden_states _UpperCAmelCase = output.transformer_decoder_hidden_states self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(snake_case_ ) , config.decoder_config.decoder_layers ) def lowercase ( self : Tuple , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Optional[Any]=False ): with torch.no_grad(): _UpperCAmelCase = MaskFormerModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() _UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ ) _UpperCAmelCase = model(snake_case_ , output_hidden_states=snake_case_ ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(snake_case_ , snake_case_ ) def lowercase ( self : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : str , snake_case_ : List[Any] ): _UpperCAmelCase = MaskFormerForInstanceSegmentation(config=snake_case_ ) model.to(snake_case_ ) model.eval() def comm_check_on_output(snake_case_ : int ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ ) _UpperCAmelCase = model(snake_case_ ) comm_check_on_output(snake_case_ ) _UpperCAmelCase = model( pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ) comm_check_on_output(snake_case_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _lowerCamelCase : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () _lowerCamelCase : Tuple = ( {"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) _lowerCamelCase : Optional[Any] = False _lowerCamelCase : Dict = False _lowerCamelCase : Any = False _lowerCamelCase : List[Any] = False def lowercase ( self : Optional[int] ): _UpperCAmelCase = MaskFormerModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ ) def lowercase ( self : Optional[Any] ): self.config_tester.run_common_tests() def lowercase ( self : Union[str, Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ ) def lowercase ( self : int ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*snake_case_ ) @unittest.skip(reason="MaskFormer does not use inputs_embeds" ) def lowercase ( self : Any ): pass @unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" ) def lowercase ( self : List[str] ): pass @unittest.skip(reason="MaskFormer is not a generative model" ) def lowercase ( self : List[str] ): pass @unittest.skip(reason="MaskFormer does not use token embeddings" ) def lowercase ( self : List[Any] ): pass @require_torch_multi_gpu @unittest.skip( reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def lowercase ( self : Any ): pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def lowercase ( self : Union[str, Any] ): pass def lowercase ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case_ ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case_ ) @slow def lowercase ( self : Optional[int] ): for model_name in ["facebook/maskformer-swin-small-coco"]: _UpperCAmelCase = MaskFormerModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def lowercase ( self : Optional[int] ): _UpperCAmelCase = (self.model_tester.min_size,) * 2 _UpperCAmelCase = { "pixel_values": torch.randn((2, 3, *size) , device=snake_case_ ), "mask_labels": torch.randn((2, 1_0, *size) , device=snake_case_ ), "class_labels": torch.zeros(2 , 1_0 , device=snake_case_ ).long(), } _UpperCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(snake_case_ ) _UpperCAmelCase = model(**snake_case_ ) self.assertTrue(outputs.loss is not None ) def lowercase ( self : Dict ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ ) def lowercase ( self : Any ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case_ ).to(snake_case_ ) _UpperCAmelCase = model(**snake_case_ , output_attentions=snake_case_ ) self.assertTrue(outputs.attentions is not None ) def lowercase ( self : int ): if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _UpperCAmelCase = self.all_model_classes[1] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.train() _UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ).loss loss.backward() def lowercase ( self : int ): # only MaskFormerForInstanceSegmentation has the loss _UpperCAmelCase = self.all_model_classes[1] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.train() _UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ) _UpperCAmelCase = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _UpperCAmelCase = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _UpperCAmelCase = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _UpperCAmelCase = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=snake_case_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __SCREAMING_SNAKE_CASE :Dict = 1e-4 def UpperCAmelCase_ ( ) -> List[str]: '''simple docstring''' _UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_vision @slow class A_ ( unittest.TestCase ): @cached_property def lowercase ( self : Dict ): return ( MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" ) if is_vision_available() else None ) def lowercase ( self : List[Any] ): _UpperCAmelCase = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(snake_case_ ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ ) _UpperCAmelCase = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) _UpperCAmelCase = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(snake_case_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) _UpperCAmelCase = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(snake_case_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) _UpperCAmelCase = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(snake_case_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : Tuple ): _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(snake_case_ ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ ) _UpperCAmelCase = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) # masks_queries_logits _UpperCAmelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _UpperCAmelCase = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] _UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) # class_queries_logits _UpperCAmelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _UpperCAmelCase = torch.tensor( [ [1.6_512e00, -5.2_572e00, -3.3_519e00], [3.6_169e-02, -5.9_025e00, -2.9_313e00], [1.0_766e-04, -7.7_630e00, -5.1_263e00], ] ).to(snake_case_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : int ): _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" ) .to(snake_case_ ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ ) _UpperCAmelCase = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) # masks_queries_logits _UpperCAmelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _UpperCAmelCase = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] _UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) # class_queries_logits _UpperCAmelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _UpperCAmelCase = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(snake_case_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : List[Any] ): _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(snake_case_ ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="pt" , ) _UpperCAmelCase = inputs["pixel_values"].to(snake_case_ ) _UpperCAmelCase = [el.to(snake_case_ ) for el in inputs["mask_labels"]] _UpperCAmelCase = [el.to(snake_case_ ) for el in inputs["class_labels"]] with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) self.assertTrue(outputs.loss is not None )
22
0
from abc import ABC, abstractmethod from argparse import ArgumentParser class _a ( lowerCAmelCase_ ): """simple docstring""" @staticmethod @abstractmethod def __A ( UpperCAmelCase : ArgumentParser ): raise NotImplementedError() @abstractmethod def __A ( self : Optional[Any] ): raise NotImplementedError()
312
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: __SCREAMING_SNAKE_CASE :List[Any] = None __SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE :List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} __SCREAMING_SNAKE_CASE :List[Any] = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''', }, } __SCREAMING_SNAKE_CASE :Optional[Any] = { '''albert-base-v1''': 512, '''albert-large-v1''': 512, '''albert-xlarge-v1''': 512, '''albert-xxlarge-v1''': 512, '''albert-base-v2''': 512, '''albert-large-v2''': 512, '''albert-xlarge-v2''': 512, '''albert-xxlarge-v2''': 512, } __SCREAMING_SNAKE_CASE :Optional[int] = '''▁''' class A_ ( lowerCAmelCase_ ): _lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES _lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : int = AlbertTokenizer def __init__( self : Optional[Any] , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=True , snake_case_ : str=True , snake_case_ : Tuple=False , snake_case_ : List[Any]="[CLS]" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : str="<unk>" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : List[Any]="<pad>" , snake_case_ : List[str]="[CLS]" , snake_case_ : int="[MASK]" , **snake_case_ : Any , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _UpperCAmelCase = ( AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token ) super().__init__( snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , ) _UpperCAmelCase = do_lower_case _UpperCAmelCase = remove_space _UpperCAmelCase = keep_accents _UpperCAmelCase = vocab_file _UpperCAmelCase = False if not self.vocab_file else True def lowercase ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ): _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowercase ( self : Dict , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ): _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(snake_case_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return _UpperCAmelCase = os.path.join( snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ): copyfile(self.vocab_file , snake_case_ ) return (out_vocab_file,)
22
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCamelCase = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
275
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging __SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE :int = { '''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class A_ ( lowerCAmelCase_ ): _lowerCamelCase : int = """perceiver""" def __init__( self : Any , snake_case_ : List[Any]=2_5_6 , snake_case_ : str=1_2_8_0 , snake_case_ : Optional[int]=7_6_8 , snake_case_ : int=1 , snake_case_ : List[Any]=2_6 , snake_case_ : Dict=8 , snake_case_ : List[Any]=8 , snake_case_ : Tuple=None , snake_case_ : Tuple=None , snake_case_ : Any="kv" , snake_case_ : Any=1 , snake_case_ : List[str]=1 , snake_case_ : Optional[int]="gelu" , snake_case_ : List[Any]=0.1 , snake_case_ : Dict=0.0_2 , snake_case_ : int=1e-12 , snake_case_ : List[str]=True , snake_case_ : str=2_6_2 , snake_case_ : Optional[Any]=2_0_4_8 , snake_case_ : Union[str, Any]=5_6 , snake_case_ : Dict=[3_6_8, 4_9_6] , snake_case_ : Tuple=1_6 , snake_case_ : Union[str, Any]=1_9_2_0 , snake_case_ : List[Any]=1_6 , snake_case_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **snake_case_ : List[Any] , ): super().__init__(**snake_case_ ) _UpperCAmelCase = num_latents _UpperCAmelCase = d_latents _UpperCAmelCase = d_model _UpperCAmelCase = num_blocks _UpperCAmelCase = num_self_attends_per_block _UpperCAmelCase = num_self_attention_heads _UpperCAmelCase = num_cross_attention_heads _UpperCAmelCase = qk_channels _UpperCAmelCase = v_channels _UpperCAmelCase = cross_attention_shape_for_attention _UpperCAmelCase = self_attention_widening_factor _UpperCAmelCase = cross_attention_widening_factor _UpperCAmelCase = hidden_act _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = use_query_residual # masked language modeling attributes _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings # image classification attributes _UpperCAmelCase = image_size # flow attributes _UpperCAmelCase = train_size # multimodal autoencoding attributes _UpperCAmelCase = num_frames _UpperCAmelCase = audio_samples_per_frame _UpperCAmelCase = samples_per_patch _UpperCAmelCase = output_shape class A_ ( lowerCAmelCase_ ): @property def lowercase ( self : int ): if self.task == "multiple-choice": _UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"} else: _UpperCAmelCase = {0: "batch", 1: "sequence"} return OrderedDict( [ ("inputs", dynamic_axis), ("attention_mask", dynamic_axis), ] ) @property def lowercase ( self : Optional[Any] ): return 1e-4 def lowercase ( self : List[str] , snake_case_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , snake_case_ : int = 3 , snake_case_ : int = 4_0 , snake_case_ : int = 4_0 , ): # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(snake_case_ , snake_case_ ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _UpperCAmelCase = compute_effective_axis_dimension( snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _UpperCAmelCase = preprocessor.num_special_tokens_to_add(snake_case_ ) _UpperCAmelCase = compute_effective_axis_dimension( snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ ) # Generate dummy inputs according to compute batch and sequence _UpperCAmelCase = [" ".join(["a"] ) * seq_length] * batch_size _UpperCAmelCase = dict(preprocessor(snake_case_ , return_tensors=snake_case_ ) ) _UpperCAmelCase = inputs.pop("input_ids" ) return inputs elif isinstance(snake_case_ , snake_case_ ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _UpperCAmelCase = compute_effective_axis_dimension(snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch ) _UpperCAmelCase = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) _UpperCAmelCase = dict(preprocessor(images=snake_case_ , return_tensors=snake_case_ ) ) _UpperCAmelCase = inputs.pop("pixel_values" ) return inputs else: raise ValueError( "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
22
0
"""simple docstring""" __SCREAMING_SNAKE_CASE =[ '''VerificationMode''', '''Version''', '''disable_progress_bar''', '''enable_progress_bar''', '''is_progress_bar_enabled''', '''experimental''', ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
213
'''simple docstring''' import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef __SCREAMING_SNAKE_CASE :List[str] = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Tuple ) -> int: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) return (preds == labels).mean() def UpperCAmelCase_ ( __lowercase : int , __lowercase : str ) -> Optional[Any]: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) _UpperCAmelCase = simple_accuracy(__lowercase , __lowercase ) _UpperCAmelCase = fa_score(y_true=__lowercase , y_pred=__lowercase ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : List[str] ) -> List[Any]: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) _UpperCAmelCase = pearsonr(__lowercase , __lowercase )[0] _UpperCAmelCase = spearmanr(__lowercase , __lowercase )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : str , __lowercase : str ) -> Tuple: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) assert len(__lowercase ) == len(__lowercase ), f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}' if task_name == "cola": return {"mcc": matthews_corrcoef(__lowercase , __lowercase )} elif task_name == "sst-2": return {"acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "mrpc": return acc_and_fa(__lowercase , __lowercase ) elif task_name == "sts-b": return pearson_and_spearman(__lowercase , __lowercase ) elif task_name == "qqp": return acc_and_fa(__lowercase , __lowercase ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "qnli": return {"acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "rte": return {"acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "wnli": return {"acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "hans": return {"acc": simple_accuracy(__lowercase , __lowercase )} else: raise KeyError(__lowercase ) def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Dict , __lowercase : str ) -> Union[str, Any]: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) if len(__lowercase ) != len(__lowercase ): raise ValueError(f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}' ) if task_name == "xnli": return {"acc": simple_accuracy(__lowercase , __lowercase )} else: raise KeyError(__lowercase )
22
0
'''simple docstring''' from __future__ import annotations from statistics import mean def lowerCAmelCase (__A , __A , __A): """simple docstring""" _a = [0] * no_of_processes _a = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(__lowercase): _a = burst_time[i] _a = [] _a = 0 _a = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: _a = [] _a = -1 for i in range(__lowercase): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(__lowercase) if len(__lowercase) > 0: _a = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: _a = i total_time += burst_time[target_process] completed += 1 _a = 0 _a = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def lowerCAmelCase (__A , __A , __A): """simple docstring""" _a = [0] * no_of_processes for i in range(__lowercase): _a = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("[TEST CASE 01]") lowercase_ = 4 lowercase_ = [2, 5, 3, 7] lowercase_ = [0, 0, 0, 0] lowercase_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes) lowercase_ = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time") for i, process_id in enumerate(list(range(1, 5))): print( F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t""" F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}""" ) print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""") print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
211
'''simple docstring''' import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict , __lowercase : str , __lowercase : Optional[Any] , __lowercase : str ) -> List[str]: '''simple docstring''' _UpperCAmelCase = TapasConfig.from_json_file(__lowercase ) # set absolute/relative position embeddings parameter _UpperCAmelCase = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase ) elif task == "WTQ": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = True # hparam_utils.py hparams _UpperCAmelCase = 0.66_4694 _UpperCAmelCase = 0.20_7951 _UpperCAmelCase = 0.12_1194 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = 0.035_2513 _UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = False # hparam_utils.py hparams _UpperCAmelCase = 36.4519 _UpperCAmelCase = 0.90_3421 _UpperCAmelCase = 222.088 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = 0.76_3141 _UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase ) elif task == "TABFACT": _UpperCAmelCase = TapasForSequenceClassification(config=__lowercase ) elif task == "MLM": _UpperCAmelCase = TapasForMaskedLM(config=__lowercase ) elif task == "INTERMEDIATE_PRETRAINING": _UpperCAmelCase = TapasModel(config=__lowercase ) else: raise ValueError(f'Task {task} not supported.' ) print(f'Building PyTorch model from configuration: {config}' ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__lowercase , __lowercase , __lowercase ) # Save pytorch-model (weights and configuration) print(f'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(__lowercase ) # Save tokenizer files print(f'Save tokenizer files to {pytorch_dump_path}' ) _UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 ) tokenizer.save_pretrained(__lowercase ) print("Used relative position embeddings:" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.''' ) parser.add_argument( '''--reset_position_index_per_cell''', default=False, action='''store_true''', help='''Whether to use relative position embeddings or not. Defaults to True.''', ) parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--tapas_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained TAPAS model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __SCREAMING_SNAKE_CASE :List[str] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
22
0
"""simple docstring""" import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class A__ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = MobileBertTokenizer SCREAMING_SNAKE_CASE = MobileBertTokenizerFast SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = filter_non_english SCREAMING_SNAKE_CASE = """google/mobilebert-uncased""" def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Any: """simple docstring""" super().setUp() __lowerCAmelCase : str = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] __lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) __lowerCAmelCase : str = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Any) -> List[Any]: """simple docstring""" __lowerCAmelCase : Optional[Any] = "UNwant\u00E9d,running" __lowerCAmelCase : Any = "unwanted, running" return input_text, output_text def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Any: """simple docstring""" __lowerCAmelCase : Optional[int] = self.tokenizer_class(self.vocab_file) __lowerCAmelCase : str = tokenizer.tokenize("UNwant\u00E9d,running") self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_) , [9, 6, 7, 12, 10, 11]) def _SCREAMING_SNAKE_CASE ( self: str) -> Union[str, Any]: """simple docstring""" if not self.test_rust_tokenizer: return __lowerCAmelCase : Any = self.get_tokenizer() __lowerCAmelCase : Optional[int] = self.get_rust_tokenizer() __lowerCAmelCase : Optional[int] = "UNwant\u00E9d,running" __lowerCAmelCase : Any = tokenizer.tokenize(snake_case_) __lowerCAmelCase : Any = rust_tokenizer.tokenize(snake_case_) self.assertListEqual(snake_case_ , snake_case_) __lowerCAmelCase : List[Any] = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_) __lowerCAmelCase : Any = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_) self.assertListEqual(snake_case_ , snake_case_) __lowerCAmelCase : List[Any] = self.get_rust_tokenizer() __lowerCAmelCase : Optional[int] = tokenizer.encode(snake_case_) __lowerCAmelCase : str = rust_tokenizer.encode(snake_case_) self.assertListEqual(snake_case_ , snake_case_) # With lower casing __lowerCAmelCase : Dict = self.get_tokenizer(do_lower_case=snake_case_) __lowerCAmelCase : str = self.get_rust_tokenizer(do_lower_case=snake_case_) __lowerCAmelCase : List[Any] = "UNwant\u00E9d,running" __lowerCAmelCase : List[Any] = tokenizer.tokenize(snake_case_) __lowerCAmelCase : str = rust_tokenizer.tokenize(snake_case_) self.assertListEqual(snake_case_ , snake_case_) __lowerCAmelCase : Union[str, Any] = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_) __lowerCAmelCase : Union[str, Any] = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_) self.assertListEqual(snake_case_ , snake_case_) __lowerCAmelCase : Optional[int] = self.get_rust_tokenizer() __lowerCAmelCase : Dict = tokenizer.encode(snake_case_) __lowerCAmelCase : Tuple = rust_tokenizer.encode(snake_case_) self.assertListEqual(snake_case_ , snake_case_) def _SCREAMING_SNAKE_CASE ( self: Any) -> Tuple: """simple docstring""" __lowerCAmelCase : List[Any] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz") , ["ah", "\u535A", "\u63A8", "zz"]) def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[str]: """simple docstring""" __lowerCAmelCase : Tuple = BasicTokenizer(do_lower_case=snake_case_) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["hello", "!", "how", "are", "you", "?"]) self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"]) def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Optional[Any]: """simple docstring""" __lowerCAmelCase : int = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hällo", "!", "how", "are", "you", "?"]) self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["h\u00E9llo"]) def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> int: """simple docstring""" __lowerCAmelCase : Any = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"]) self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"]) def _SCREAMING_SNAKE_CASE ( self: str) -> int: """simple docstring""" __lowerCAmelCase : List[Any] = BasicTokenizer(do_lower_case=snake_case_) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"]) self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"]) def _SCREAMING_SNAKE_CASE ( self: Dict) -> int: """simple docstring""" __lowerCAmelCase : Dict = BasicTokenizer(do_lower_case=snake_case_) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["HeLLo", "!", "how", "Are", "yoU", "?"]) def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> str: """simple docstring""" __lowerCAmelCase : int = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HäLLo", "!", "how", "Are", "yoU", "?"]) def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Optional[int]: """simple docstring""" __lowerCAmelCase : Union[str, Any] = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HaLLo", "!", "how", "Are", "yoU", "?"]) def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Any: """simple docstring""" __lowerCAmelCase : int = BasicTokenizer(do_lower_case=snake_case_ , never_split=["[UNK]"]) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]") , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"]) def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Optional[int]: """simple docstring""" __lowerCAmelCase : List[Any] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] __lowerCAmelCase : Optional[Any] = {} for i, token in enumerate(snake_case_): __lowerCAmelCase : List[Any] = i __lowerCAmelCase : Dict = WordpieceTokenizer(vocab=snake_case_ , unk_token="[UNK]") self.assertListEqual(tokenizer.tokenize("") , []) self.assertListEqual(tokenizer.tokenize("unwanted running") , ["un", "##want", "##ed", "runn", "##ing"]) self.assertListEqual(tokenizer.tokenize("unwantedX running") , ["[UNK]", "runn", "##ing"]) def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[Any]: """simple docstring""" self.assertTrue(_is_whitespace(" ")) self.assertTrue(_is_whitespace("\t")) self.assertTrue(_is_whitespace("\r")) self.assertTrue(_is_whitespace("\n")) self.assertTrue(_is_whitespace("\u00A0")) self.assertFalse(_is_whitespace("A")) self.assertFalse(_is_whitespace("-")) def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[str]: """simple docstring""" self.assertTrue(_is_control("\u0005")) self.assertFalse(_is_control("A")) self.assertFalse(_is_control(" ")) self.assertFalse(_is_control("\t")) self.assertFalse(_is_control("\r")) def _SCREAMING_SNAKE_CASE ( self: str) -> List[str]: """simple docstring""" self.assertTrue(_is_punctuation("-")) self.assertTrue(_is_punctuation("$")) self.assertTrue(_is_punctuation("`")) self.assertTrue(_is_punctuation(".")) self.assertFalse(_is_punctuation("A")) self.assertFalse(_is_punctuation(" ")) def _SCREAMING_SNAKE_CASE ( self: Any) -> Dict: """simple docstring""" __lowerCAmelCase : Optional[Any] = self.get_tokenizer() __lowerCAmelCase : Tuple = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(snake_case_) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]]) self.assertListEqual( [rust_tokenizer.tokenize(snake_case_) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]]) @slow def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Dict: """simple docstring""" __lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained("google/mobilebert-uncased") __lowerCAmelCase : Optional[int] = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_) __lowerCAmelCase : Optional[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_) __lowerCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(snake_case_) __lowerCAmelCase : int = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Tuple: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""): __lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_) __lowerCAmelCase : List[Any] = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" __lowerCAmelCase : Optional[Any] = tokenizer_r.encode_plus( snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , return_offsets_mapping=snake_case_ , add_special_tokens=snake_case_ , ) __lowerCAmelCase : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(snake_case_ , "do_lower_case") else False __lowerCAmelCase : Any = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"])) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"]) def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Optional[int]: """simple docstring""" __lowerCAmelCase : Tuple = ["的", "人", "有"] __lowerCAmelCase : Any = "".join(snake_case_) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""): __lowerCAmelCase : List[str] = True __lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_) __lowerCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_) __lowerCAmelCase : str = tokenizer_p.encode(snake_case_ , add_special_tokens=snake_case_) __lowerCAmelCase : int = tokenizer_r.encode(snake_case_ , add_special_tokens=snake_case_) __lowerCAmelCase : Any = tokenizer_r.convert_ids_to_tokens(snake_case_) __lowerCAmelCase : Any = tokenizer_p.convert_ids_to_tokens(snake_case_) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(snake_case_ , snake_case_) self.assertListEqual(snake_case_ , snake_case_) __lowerCAmelCase : Any = False __lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_) __lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_) __lowerCAmelCase : Optional[Any] = tokenizer_r.encode(snake_case_ , add_special_tokens=snake_case_) __lowerCAmelCase : Tuple = tokenizer_p.encode(snake_case_ , add_special_tokens=snake_case_) __lowerCAmelCase : Any = tokenizer_r.convert_ids_to_tokens(snake_case_) __lowerCAmelCase : Any = tokenizer_p.convert_ids_to_tokens(snake_case_) # it is expected that only the first Chinese character is not preceded by "##". __lowerCAmelCase : str = [ F"""##{token}""" if idx != 0 else token for idx, token in enumerate(snake_case_) ] self.assertListEqual(snake_case_ , snake_case_) self.assertListEqual(snake_case_ , snake_case_)
269
'''simple docstring''' import os from datetime import datetime as dt from github import Github __SCREAMING_SNAKE_CASE :str = [ '''good first issue''', '''feature request''', '''wip''', ] def UpperCAmelCase_ ( ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase = Github(os.environ["GITHUB_TOKEN"] ) _UpperCAmelCase = g.get_repo("huggingface/accelerate" ) _UpperCAmelCase = repo.get_issues(state="open" ) for issue in open_issues: _UpperCAmelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowercase : i.created_at , reverse=__lowercase ) _UpperCAmelCase = comments[0] if len(__lowercase ) > 0 else None _UpperCAmelCase = dt.utcnow() _UpperCAmelCase = (current_time - issue.updated_at).days _UpperCAmelCase = (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state="closed" ) elif ( days_since_updated > 23 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Add stale comment issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) if __name__ == "__main__": main()
22
0
import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class A_ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' _UpperCamelCase : str = CTRLTokenizer _UpperCamelCase : str = False _UpperCamelCase : str = False def SCREAMING_SNAKE_CASE__ ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>'] lowercase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) ) lowercase = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', ''] lowercase = {'unk_token': '<unk>'} lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(snake_case_ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(snake_case_ ) ) def SCREAMING_SNAKE_CASE__ ( self , **snake_case ): kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def SCREAMING_SNAKE_CASE__ ( self , snake_case ): lowercase = 'adapt react readapt apt' lowercase = 'adapt react readapt apt' return input_text, output_text def SCREAMING_SNAKE_CASE__ ( self ): lowercase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase = 'adapt react readapt apt' lowercase = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split() lowercase = tokenizer.tokenize(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) lowercase = tokens + [tokenizer.unk_token] lowercase = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
195
'''simple docstring''' import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( "files" , [ ["full:README.md", "dataset_infos.json"], ["empty:README.md", "dataset_infos.json"], ["dataset_infos.json"], ["full:README.md"], ] , ) def UpperCAmelCase_ ( __lowercase : Any , __lowercase : int ) -> int: '''simple docstring''' _UpperCAmelCase = tmp_path_factory.mktemp("dset_infos_dir" ) if "full:README.md" in files: with open(dataset_infos_dir / "README.md" , "w" ) as f: f.write("---\ndataset_info:\n dataset_size: 42\n---" ) if "empty:README.md" in files: with open(dataset_infos_dir / "README.md" , "w" ) as f: f.write("" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f: f.write("{\"default\": {\"dataset_size\": 42}}" ) _UpperCAmelCase = DatasetInfosDict.from_directory(__lowercase ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( "dataset_info" , [ DatasetInfo(), DatasetInfo( description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ), ] , ) def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : DatasetInfo ) -> Any: '''simple docstring''' _UpperCAmelCase = str(__lowercase ) dataset_info.write_to_directory(__lowercase ) _UpperCAmelCase = DatasetInfo.from_directory(__lowercase ) assert dataset_info == reloaded assert os.path.exists(os.path.join(__lowercase , "dataset_info.json" ) ) def UpperCAmelCase_ ( ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase = DatasetInfo( description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) _UpperCAmelCase = dataset_info._to_yaml_dict() assert sorted(__lowercase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) _UpperCAmelCase = yaml.safe_dump(__lowercase ) _UpperCAmelCase = yaml.safe_load(__lowercase ) assert dataset_info_yaml_dict == reloaded def UpperCAmelCase_ ( ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = DatasetInfo() _UpperCAmelCase = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( "dataset_infos_dict" , [ DatasetInfosDict(), DatasetInfosDict({"default": DatasetInfo()} ), DatasetInfosDict({"my_config_name": DatasetInfo()} ), DatasetInfosDict( { "default": DatasetInfo( description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ) } ), DatasetInfosDict( { "v1": DatasetInfo(dataset_size=42 ), "v2": DatasetInfo(dataset_size=1337 ), } ), ] , ) def UpperCAmelCase_ ( __lowercase : int , __lowercase : DatasetInfosDict ) -> Dict: '''simple docstring''' _UpperCAmelCase = str(__lowercase ) dataset_infos_dict.write_to_directory(__lowercase ) _UpperCAmelCase = DatasetInfosDict.from_directory(__lowercase ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): _UpperCAmelCase = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml _UpperCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(__lowercase , "README.md" ) )
22
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) a = { '''configuration_owlvit''': [ '''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OwlViTConfig''', '''OwlViTOnnxConfig''', '''OwlViTTextConfig''', '''OwlViTVisionConfig''', ], '''processing_owlvit''': ['''OwlViTProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['''OwlViTFeatureExtractor'''] a = ['''OwlViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ '''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OwlViTModel''', '''OwlViTPreTrainedModel''', '''OwlViTTextModel''', '''OwlViTVisionModel''', '''OwlViTForObjectDetection''', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
155
'''simple docstring''' def UpperCAmelCase_ ( __lowercase : str ) -> str: '''simple docstring''' return " ".join( "".join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('''Hey wollef sroirraw'''))
22
0
"""simple docstring""" import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class a ( lowerCAmelCase_ ): @require_torch def UpperCamelCase_ ( self ): # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched lowercase = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' lowercase = '\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n ' lowercase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn\'t access internet\")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache lowercase = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(snake_case_ ) BertModel.from_pretrained(snake_case_ ) BertTokenizer.from_pretrained(snake_case_ ) pipeline(task='fill-mask' , model=snake_case_ ) # baseline - just load from_pretrained with normal network lowercase = [sys.executable, '-c', '\n'.join([load, run, mock] )] # should succeed lowercase = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowercase = '1' lowercase = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) @require_torch def UpperCamelCase_ ( self ): # python one-liner segments # this must be loaded before socket.socket is monkey-patched lowercase = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' lowercase = '\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n ' lowercase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache lowercase = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(snake_case_ ) BertModel.from_pretrained(snake_case_ ) BertTokenizer.from_pretrained(snake_case_ ) pipeline(task='fill-mask' , model=snake_case_ ) # baseline - just load from_pretrained with normal network lowercase = [sys.executable, '-c', '\n'.join([load, run, mock] )] # should succeed lowercase = self.get_env() lowercase = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) @require_torch def UpperCamelCase_ ( self ): # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched lowercase = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n ' lowercase = '\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n ' lowercase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n ' # baseline - just load from_pretrained with normal network lowercase = [sys.executable, '-c', '\n'.join([load, run] )] # should succeed lowercase = self.get_env() lowercase = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) # next emulate no network lowercase = [sys.executable, '-c', '\n'.join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowercase = '1' lowercase = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) @require_torch def UpperCamelCase_ ( self ): lowercase = '\nfrom transformers import pipeline\n ' lowercase = '\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n ' lowercase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n ' lowercase = self.get_env() lowercase = '1' lowercase = [sys.executable, '-c', '\n'.join([load, mock, run] )] lowercase = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( 'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , ) @require_torch def UpperCamelCase_ ( self ): lowercase = '\nfrom transformers import AutoModel\n ' lowercase = '\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n ' # baseline - just load from_pretrained with normal network lowercase = [sys.executable, '-c', '\n'.join([load, run] )] # should succeed lowercase = self.get_env() lowercase = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowercase = '1' lowercase = subprocess.run(snake_case_ , env=snake_case_ , check=snake_case_ , capture_output=snake_case_ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() )
220
'''simple docstring''' def UpperCAmelCase_ ( __lowercase : str ) -> list: '''simple docstring''' if n_term == "": return [] _UpperCAmelCase = [] for temp in range(int(__lowercase ) ): series.append(f'1/{temp + 1}' if series else "1" ) return series if __name__ == "__main__": __SCREAMING_SNAKE_CASE :str = input('''Enter the last number (nth term) of the Harmonic Series''') print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''') print(harmonic_series(nth_term))
22
0
'''simple docstring''' import importlib.metadata from typing import Union from packaging.version import Version, parse from .constants import STR_OPERATION_TO_FUNC UpperCAmelCase = parse(importlib.metadata.version('''torch''')) def __UpperCamelCase ( lowercase__ : Union[str, Version], lowercase__ : str, lowercase__ : str ): '''simple docstring''' if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' ) __lowercase =STR_OPERATION_TO_FUNC[operation] if isinstance(__lowercase, __lowercase ): __lowercase =parse(importlib.metadata.version(__lowercase ) ) return operation(__lowercase, parse(__lowercase ) ) def __UpperCamelCase ( lowercase__ : str, lowercase__ : str ): '''simple docstring''' return compare_versions(__lowercase, __lowercase, __lowercase )
141
'''simple docstring''' import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __SCREAMING_SNAKE_CASE :int = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''') @require_sentencepiece @require_tokenizers class A_ ( lowerCAmelCase_ , unittest.TestCase ): _lowerCamelCase : List[str] = PegasusTokenizer _lowerCamelCase : int = PegasusTokenizerFast _lowerCamelCase : Union[str, Any] = True _lowerCamelCase : List[str] = True def lowercase ( self : Optional[int] ): super().setUp() # We have a SentencePiece fixture for testing _UpperCAmelCase = PegasusTokenizer(snake_case_ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase ( self : Tuple ): return PegasusTokenizer.from_pretrained("google/pegasus-large" ) def lowercase ( self : Union[str, Any] , **snake_case_ : Union[str, Any] ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def lowercase ( self : Tuple , snake_case_ : Any ): return ("This is a test", "This is a test") def lowercase ( self : Optional[int] ): _UpperCAmelCase = "</s>" _UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ ) def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "</s>" ) self.assertEqual(vocab_keys[-1] , "v" ) self.assertEqual(len(snake_case_ ) , 1_1_0_3 ) def lowercase ( self : Any ): self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 ) def lowercase ( self : List[Any] ): _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) _UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname ) _UpperCAmelCase = ( "Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important" " </s> <pad> <pad> <pad>" ) _UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0] _UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0] self.assertListEqual(snake_case_ , snake_case_ ) def lowercase ( self : Tuple ): _UpperCAmelCase = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word _UpperCAmelCase = "<mask_1> To ensure a <mask_2> flow of bank resolutions." _UpperCAmelCase = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1] _UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=snake_case_ ).input_ids[0] self.assertListEqual(snake_case_ , snake_case_ ) def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6_1_0_3 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 1_0_3 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1_0_2_4 _UpperCAmelCase = "To ensure a smooth flow of bank resolutions." _UpperCAmelCase = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1] _UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=snake_case_ ).input_ids[0] self.assertListEqual(snake_case_ , snake_case_ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def lowercase ( self : int ): _UpperCAmelCase = ["This is going to be way too long." * 1_5_0, "short example"] _UpperCAmelCase = ["not super long but more than 5 tokens", "tiny"] _UpperCAmelCase = self._large_tokenizer(snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" ) _UpperCAmelCase = self._large_tokenizer( text_target=snake_case_ , max_length=5 , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" ) assert batch.input_ids.shape == (2, 1_0_2_4) assert batch.attention_mask.shape == (2, 1_0_2_4) assert targets["input_ids"].shape == (2, 5) assert len(snake_case_ ) == 2 # input_ids, attention_mask. @slow def lowercase ( self : Dict ): # fmt: off _UpperCAmelCase = {"input_ids": [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case_ , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , ) @require_sentencepiece @require_tokenizers class A_ ( lowerCAmelCase_ , unittest.TestCase ): _lowerCamelCase : List[str] = PegasusTokenizer _lowerCamelCase : List[Any] = PegasusTokenizerFast _lowerCamelCase : int = True _lowerCamelCase : Union[str, Any] = True def lowercase ( self : Any ): super().setUp() # We have a SentencePiece fixture for testing _UpperCAmelCase = PegasusTokenizer(snake_case_ , offset=0 , mask_token_sent=snake_case_ , mask_token="[MASK]" ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase ( self : Tuple ): return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" ) def lowercase ( self : Optional[Any] , **snake_case_ : Dict ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def lowercase ( self : Union[str, Any] , snake_case_ : str ): return ("This is a test", "This is a test") def lowercase ( self : List[str] ): _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) _UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname ) _UpperCAmelCase = ( "Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>" " <pad> <pad> <pad>" ) _UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0] _UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0] self.assertListEqual(snake_case_ , snake_case_ ) @require_torch def lowercase ( self : Tuple ): _UpperCAmelCase = ["This is going to be way too long." * 1_0_0_0, "short example"] _UpperCAmelCase = ["not super long but more than 5 tokens", "tiny"] _UpperCAmelCase = self._large_tokenizer(snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" ) _UpperCAmelCase = self._large_tokenizer( text_target=snake_case_ , max_length=5 , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" ) assert batch.input_ids.shape == (2, 4_0_9_6) assert batch.attention_mask.shape == (2, 4_0_9_6) assert targets["input_ids"].shape == (2, 5) assert len(snake_case_ ) == 2 # input_ids, attention_mask. def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = ( "This is an example string that is used to test the original TF implementation against the HF" " implementation" ) _UpperCAmelCase = self._large_tokenizer(snake_case_ ).input_ids self.assertListEqual( snake_case_ , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
22
0
from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar lowerCamelCase : int = TypeVar('KEY') lowerCamelCase : List[Any] = TypeVar('VAL') @dataclass(frozen=lowerCAmelCase_ , slots=lowerCAmelCase_ ) class __lowercase (Generic[KEY, VAL] ): """simple docstring""" _snake_case = 42 _snake_case = 42 class __lowercase (_Item ): """simple docstring""" def __init__( self ) -> Tuple: super().__init__(snake_case_ , snake_case_ ) def __bool__( self ) -> Union[str, Any]: return False lowerCamelCase : Dict = _DeletedItem() class __lowercase (MutableMapping[KEY, VAL] ): """simple docstring""" def __init__( self , A = 8 , A = 0.75 ) -> str: snake_case : Any = initial_block_size snake_case : Tuple = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 snake_case : Union[str, Any] = capacity_factor snake_case : Optional[Any] = 0 def UpperCAmelCase ( self , A ) -> Optional[int]: return hash(snake_case_ ) % len(self._buckets ) def UpperCAmelCase ( self , A ) -> Optional[Any]: return (ind + 1) % len(self._buckets ) def UpperCAmelCase ( self , A , A , A ) -> int: snake_case : Any = self._buckets[ind] if not stored: snake_case : List[str] = _Item(snake_case_ , snake_case_ ) self._len += 1 return True elif stored.key == key: snake_case : Optional[Any] = _Item(snake_case_ , snake_case_ ) return True else: return False def UpperCAmelCase ( self ) -> Union[str, Any]: snake_case : str = len(self._buckets ) * self._capacity_factor return len(self ) >= int(snake_case_ ) def UpperCAmelCase ( self ) -> int: if len(self._buckets ) <= self._initial_block_size: return False snake_case : List[str] = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def UpperCAmelCase ( self , A ) -> List[Any]: snake_case : Tuple = self._buckets snake_case : Dict = [None] * new_size snake_case : List[Any] = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def UpperCAmelCase ( self ) -> Tuple: self._resize(len(self._buckets ) * 2 ) def UpperCAmelCase ( self ) -> str: self._resize(len(self._buckets ) // 2 ) def UpperCAmelCase ( self , A ) -> str: snake_case : Union[str, Any] = self._get_bucket_index(snake_case_ ) for _ in range(len(self._buckets ) ): yield ind snake_case : str = self._get_next_ind(snake_case_ ) def UpperCAmelCase ( self , A , A ) -> Optional[Any]: for ind in self._iterate_buckets(snake_case_ ): if self._try_set(snake_case_ , snake_case_ , snake_case_ ): break def __setitem__( self , A , A ) -> Optional[int]: if self._is_full(): self._size_up() self._add_item(snake_case_ , snake_case_ ) def __delitem__( self , A ) -> Any: for ind in self._iterate_buckets(snake_case_ ): snake_case : List[str] = self._buckets[ind] if item is None: raise KeyError(snake_case_ ) if item is _deleted: continue if item.key == key: snake_case : Union[str, Any] = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , A ) -> List[str]: for ind in self._iterate_buckets(snake_case_ ): snake_case : Tuple = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(snake_case_ ) def __len__( self ) -> Optional[int]: return self._len def __iter__( self ) -> List[str]: yield from (item.key for item in self._buckets if item) def __repr__( self ) -> Tuple: snake_case : int = """ ,""".join( f"""{item.key}: {item.val}""" for item in self._buckets if item ) return f"""HashMap({val_string})"""
124
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class A_ ( unittest.TestCase ): def lowercase ( self : int ): _UpperCAmelCase = tempfile.mkdtemp() _UpperCAmelCase = BlipImageProcessor() _UpperCAmelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) _UpperCAmelCase = BlipProcessor(snake_case_ , snake_case_ ) processor.save_pretrained(self.tmpdirname ) def lowercase ( self : Tuple , **snake_case_ : int ): return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).tokenizer def lowercase ( self : Dict , **snake_case_ : Any ): return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).image_processor def lowercase ( self : int ): shutil.rmtree(self.tmpdirname ) def lowercase ( self : Optional[Any] ): _UpperCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] _UpperCAmelCase = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase ( self : int ): _UpperCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) _UpperCAmelCase = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 ) _UpperCAmelCase = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case_ ) def lowercase ( self : Any ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = image_processor(snake_case_ , return_tensors="np" ) _UpperCAmelCase = processor(images=snake_case_ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase ( self : Optional[int] ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _UpperCAmelCase = "lower newer" _UpperCAmelCase = processor(text=snake_case_ ) _UpperCAmelCase = tokenizer(snake_case_ , return_token_type_ids=snake_case_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase ( self : Optional[Any] ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _UpperCAmelCase = "lower newer" _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = processor(text=snake_case_ , images=snake_case_ ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(snake_case_ ): processor() def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCAmelCase = processor.batch_decode(snake_case_ ) _UpperCAmelCase = tokenizer.batch_decode(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) def lowercase ( self : str ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _UpperCAmelCase = "lower newer" _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = processor(text=snake_case_ , images=snake_case_ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
22
0
import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging __A : List[Any] = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt'''] __A : int = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse("0.9.0"): raise Exception("requires fairseq >= 0.9.0") logging.set_verbosity_info() __A : str = logging.get_logger(__name__) __A : Dict = ''' Hello world! cécé herlolip''' __A : Tuple = [ ('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''), ('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''), ('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''), ('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''), ] def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int: '''simple docstring''' UpperCAmelCase = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', ] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any: '''simple docstring''' UpperCAmelCase = dct.pop(__lowercase ) UpperCAmelCase = val def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Tuple: '''simple docstring''' UpperCAmelCase = torch.load(__lowercase , map_location='''cpu''' ) UpperCAmelCase = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval() hub_interface.model.load_state_dict(sd['''model'''] ) return hub_interface def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase = emb.weight.shape UpperCAmelCase = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) UpperCAmelCase = emb.weight.data return lin_layer @torch.no_grad() def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> Tuple: '''simple docstring''' if not os.path.exists(__lowercase ): UpperCAmelCase = torch.hub.load('''pytorch/fairseq''' , __lowercase ).eval() else: UpperCAmelCase = load_xsum_checkpoint(__lowercase ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: UpperCAmelCase = checkpoint_path.replace('''.''' , '''-''' ) UpperCAmelCase = BartConfig.from_pretrained(__lowercase ) UpperCAmelCase = bart.encode(__lowercase ).unsqueeze(0 ) UpperCAmelCase = BartTokenizer.from_pretrained(__lowercase ).encode(__lowercase , return_tensors='''pt''' ).unsqueeze(0 ) if not torch.eq(__lowercase , __lowercase ).all(): raise ValueError( F"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" ) if checkpoint_path == "bart.large.mnli": UpperCAmelCase = bart.state_dict() remove_ignore_keys_(__lowercase ) UpperCAmelCase = state_dict['''model.decoder.embed_tokens.weight'''] for src, dest in mnli_rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) UpperCAmelCase = BartForSequenceClassification(__lowercase ).eval() model.load_state_dict(__lowercase ) UpperCAmelCase = bart.predict('''mnli''' , __lowercase , return_logits=__lowercase ) UpperCAmelCase = model(__lowercase )[0] # logits else: # no classification heads to worry about UpperCAmelCase = bart.model.state_dict() remove_ignore_keys_(__lowercase ) UpperCAmelCase = state_dict['''decoder.embed_tokens.weight'''] UpperCAmelCase = bart.extract_features(__lowercase ) if hf_checkpoint_name == "facebook/bart-large": UpperCAmelCase = BartModel(__lowercase ).eval() model.load_state_dict(__lowercase ) UpperCAmelCase = model(__lowercase ).model[0] else: UpperCAmelCase = BartForConditionalGeneration(__lowercase ).eval() # an existing summarization ckpt model.model.load_state_dict(__lowercase ) if hasattr(__lowercase , '''lm_head''' ): UpperCAmelCase = make_linear_from_emb(model.model.shared ) UpperCAmelCase = model.model(__lowercase )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( F"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' ) Path(__lowercase ).mkdir(exist_ok=__lowercase ) model.save_pretrained(__lowercase ) if __name__ == "__main__": __A : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem." ) parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default=None, type=str, help="Which huggingface architecture to use: bart-large-xsum" ) __A : Union[str, Any] = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
273
'''simple docstring''' import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def UpperCAmelCase_ ( __lowercase : str ) -> List[str]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = image.size _UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 _UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) _UpperCAmelCase = np.array(__lowercase ).astype(np.floataa ) / 255.0 _UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 ) _UpperCAmelCase = torch.from_numpy(__lowercase ) return 2.0 * image - 1.0 class A_ ( lowerCAmelCase_ ): def __init__( self : Optional[Any] , snake_case_ : VQModel , snake_case_ : UNetaDModel , snake_case_ : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): super().__init__() self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ ) @torch.no_grad() def __call__( self : Any , snake_case_ : Union[torch.Tensor, PIL.Image.Image] = None , snake_case_ : Optional[int] = 1 , snake_case_ : Optional[int] = 1_0_0 , snake_case_ : Optional[float] = 0.0 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ): if isinstance(snake_case_ , PIL.Image.Image ): _UpperCAmelCase = 1 elif isinstance(snake_case_ , torch.Tensor ): _UpperCAmelCase = image.shape[0] else: raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}' ) if isinstance(snake_case_ , PIL.Image.Image ): _UpperCAmelCase = preprocess(snake_case_ ) _UpperCAmelCase , _UpperCAmelCase = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image _UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width) _UpperCAmelCase = next(self.unet.parameters() ).dtype _UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ ) _UpperCAmelCase = image.to(device=self.device , dtype=snake_case_ ) # set timesteps and move to the correct device self.scheduler.set_timesteps(snake_case_ , device=self.device ) _UpperCAmelCase = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler _UpperCAmelCase = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _UpperCAmelCase = {} if accepts_eta: _UpperCAmelCase = eta for t in self.progress_bar(snake_case_ ): # concat latents and low resolution image in the channel dimension. _UpperCAmelCase = torch.cat([latents, image] , dim=1 ) _UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ ) # predict the noise residual _UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample # compute the previous noisy sample x_t -> x_t-1 _UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample # decode the image latents with the VQVAE _UpperCAmelCase = self.vqvae.decode(snake_case_ ).sample _UpperCAmelCase = torch.clamp(snake_case_ , -1.0 , 1.0 ) _UpperCAmelCase = image / 2 + 0.5 _UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _UpperCAmelCase = self.numpy_to_pil(snake_case_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=snake_case_ )
22
0
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable __a :Union[str, Any] = { '''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''], '''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Union[str, Any] = [ '''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXJapaneseForCausalLM''', '''GPTNeoXJapaneseLayer''', '''GPTNeoXJapaneseModel''', '''GPTNeoXJapanesePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys __a :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
312
'''simple docstring''' import string from math import logaa def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> int: '''simple docstring''' _UpperCAmelCase = document.translate( str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" ) _UpperCAmelCase = document_without_punctuation.split(" " ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> tuple[int, int]: '''simple docstring''' _UpperCAmelCase = corpus.lower().translate( str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with '' _UpperCAmelCase = corpus_without_punctuation.split("\n" ) _UpperCAmelCase = term.lower() return (len([doc for doc in docs if term in doc] ), len(__lowercase )) def UpperCAmelCase_ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any]=False ) -> float: '''simple docstring''' if smoothing: if n == 0: raise ValueError("log10(0) is undefined." ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("df must be > 0" ) elif n == 0: raise ValueError("log10(0) is undefined." ) return round(logaa(n / df ) , 3 ) def UpperCAmelCase_ ( __lowercase : int , __lowercase : int ) -> float: '''simple docstring''' return round(tf * idf , 3 )
22
0
import string from math import logaa def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : Optional[int] = document.translate( str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' ) __lowerCAmelCase : Dict = document_without_punctuation.split(''' ''' ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : int = corpus.lower().translate( str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with '' __lowerCAmelCase : Union[str, Any] = corpus_without_punctuation.split('''\n''' ) __lowerCAmelCase : str = term.lower() return (len([doc for doc in docs if term in doc] ), len(__lowercase )) def _lowercase ( lowercase__ , lowercase__ , lowercase__=False ): if smoothing: if n == 0: raise ValueError('''log10(0) is undefined.''' ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError('''df must be > 0''' ) elif n == 0: raise ValueError('''log10(0) is undefined.''' ) return round(logaa(n / df ) , 3 ) def _lowercase ( lowercase__ , lowercase__ ): return round(tf * idf , 3 )
275
'''simple docstring''' from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
22
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE ={ '''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig'''] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =['''RemBertTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =['''RemBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ '''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RemBertForCausalLM''', '''RemBertForMaskedLM''', '''RemBertForMultipleChoice''', '''RemBertForQuestionAnswering''', '''RemBertForSequenceClassification''', '''RemBertForTokenClassification''', '''RemBertLayer''', '''RemBertModel''', '''RemBertPreTrainedModel''', '''load_tf_weights_in_rembert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ '''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRemBertForCausalLM''', '''TFRemBertForMaskedLM''', '''TFRemBertForMultipleChoice''', '''TFRemBertForQuestionAnswering''', '''TFRemBertForSequenceClassification''', '''TFRemBertForTokenClassification''', '''TFRemBertLayer''', '''TFRemBertModel''', '''TFRemBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
213
'''simple docstring''' def UpperCAmelCase_ ( __lowercase : int ) -> int: '''simple docstring''' if not isinstance(__lowercase , __lowercase ) or number < 0: raise ValueError("Input must be a non-negative integer" ) _UpperCAmelCase = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
22
0
'''simple docstring''' def lowerCAmelCase (__A): """simple docstring""" if edge <= 0 or not isinstance(__lowercase , __lowercase): raise ValueError('''Length must be a positive.''') return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def lowerCAmelCase (__A): """simple docstring""" if edge <= 0 or not isinstance(__lowercase , __lowercase): raise ValueError('''Length must be a positive.''') return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
211
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar __SCREAMING_SNAKE_CASE :Optional[int] = TypeVar('''T''') class A_ ( Generic[T] ): def __init__( self : List[Any] , snake_case_ : list[T] , snake_case_ : Callable[[T, T], T] ): _UpperCAmelCase = None _UpperCAmelCase = len(snake_case_ ) _UpperCAmelCase = [any_type for _ in range(self.N )] + arr _UpperCAmelCase = fnc self.build() def lowercase ( self : List[Any] ): for p in range(self.N - 1 , 0 , -1 ): _UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : T ): p += self.N _UpperCAmelCase = v while p > 1: _UpperCAmelCase = p // 2 _UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def lowercase ( self : Any , snake_case_ : int , snake_case_ : int ): # noqa: E741 _UpperCAmelCase , _UpperCAmelCase = l + self.N, r + self.N _UpperCAmelCase = None while l <= r: if l % 2 == 1: _UpperCAmelCase = self.st[l] if res is None else self.fn(snake_case_ , self.st[l] ) if r % 2 == 0: _UpperCAmelCase = self.st[r] if res is None else self.fn(snake_case_ , self.st[r] ) _UpperCAmelCase , _UpperCAmelCase = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce __SCREAMING_SNAKE_CASE :Union[str, Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] __SCREAMING_SNAKE_CASE :List[str] = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } __SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, min) __SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, max) __SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, lambda a, b: a + b) def UpperCAmelCase_ ( ) -> None: '''simple docstring''' for i in range(len(__lowercase ) ): for j in range(__lowercase , len(__lowercase ) ): _UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] ) _UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] ) _UpperCAmelCase = reduce(lambda __lowercase , __lowercase : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(__lowercase , __lowercase ) assert max_range == max_segment_tree.query(__lowercase , __lowercase ) assert sum_range == sum_segment_tree.query(__lowercase , __lowercase ) test_all_segments() for index, value in test_updates.items(): __SCREAMING_SNAKE_CASE :str = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
22
0
"""simple docstring""" import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class A__ ( unittest.TestCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[Any]: """simple docstring""" torch.manual_seed(0) __lowerCAmelCase : Any = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> str: """simple docstring""" __lowerCAmelCase : Tuple = self.dummy_uncond_unet __lowerCAmelCase : str = PNDMScheduler() __lowerCAmelCase : List[str] = PNDMPipeline(unet=snake_case_ , scheduler=snake_case_) pndm.to(snake_case_) pndm.set_progress_bar_config(disable=snake_case_) __lowerCAmelCase : int = torch.manual_seed(0) __lowerCAmelCase : Optional[int] = pndm(generator=snake_case_ , num_inference_steps=20 , output_type="numpy").images __lowerCAmelCase : Union[str, Any] = torch.manual_seed(0) __lowerCAmelCase : Optional[Any] = pndm(generator=snake_case_ , num_inference_steps=20 , output_type="numpy" , return_dict=snake_case_)[0] __lowerCAmelCase : Dict = image[0, -3:, -3:, -1] __lowerCAmelCase : List[str] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : Union[str, Any] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @slow @require_torch class A__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[Any]: """simple docstring""" __lowerCAmelCase : int = "google/ddpm-cifar10-32" __lowerCAmelCase : str = UNetaDModel.from_pretrained(snake_case_) __lowerCAmelCase : Tuple = PNDMScheduler() __lowerCAmelCase : Tuple = PNDMPipeline(unet=snake_case_ , scheduler=snake_case_) pndm.to(snake_case_) pndm.set_progress_bar_config(disable=snake_case_) __lowerCAmelCase : int = torch.manual_seed(0) __lowerCAmelCase : int = pndm(generator=snake_case_ , output_type="numpy").images __lowerCAmelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : List[str] = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
269
'''simple docstring''' import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( "kwargs, expected" , [ ({"num_shards": 0, "max_num_jobs": 1}, []), ({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]), ({"num_shards": 10, "max_num_jobs": 10}, [range(__lowercase , i + 1 ) for i in range(10 )]), ({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]), ({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = _distribute_shards(**__lowercase ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, max_num_jobs, expected" , [ ({"foo": 0}, 10, [{"foo": 0}]), ({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]), ({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]), ({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]), ({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]), ] , ) def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : Optional[Any] , __lowercase : int ) -> str: '''simple docstring''' _UpperCAmelCase = _split_gen_kwargs(__lowercase , __lowercase ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, expected" , [ ({"foo": 0}, 1), ({"shards": [0]}, 1), ({"shards": [0, 1, 2, 3]}, 4), ({"shards": [0, 1, 2, 3], "foo": 0}, 4), ({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4), ({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError), ] , ) def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : List[Any] ) -> List[Any]: '''simple docstring''' if expected is RuntimeError: with pytest.raises(__lowercase ): _number_of_shards_in_gen_kwargs(__lowercase ) else: _UpperCAmelCase = _number_of_shards_in_gen_kwargs(__lowercase ) assert out == expected
22
0
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} UpperCAmelCase = { '''vocab_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), }, '''merges_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), }, } UpperCAmelCase = { '''allenai/longformer-base-4096''': 4096, '''allenai/longformer-large-4096''': 4096, '''allenai/longformer-large-4096-finetuned-triviaqa''': 4096, '''allenai/longformer-base-4096-extra.pos.embd.only''': 4096, '''allenai/longformer-large-4096-extra.pos.embd.only''': 4096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def UpperCAmelCase_ ( ): lowercase = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) lowercase = bs[:] lowercase = 0 for b in range(2**8 ): if b not in bs: bs.append(__lowercase ) cs.append(2**8 + n ) n += 1 lowercase = [chr(__lowercase ) for n in cs] return dict(zip(__lowercase , __lowercase ) ) def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): lowercase = set() lowercase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase = char return pairs class A_ ( lowerCAmelCase_ ): '''simple docstring''' _UpperCamelCase : List[Any] = VOCAB_FILES_NAMES _UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : List[Any] = ["""input_ids""", """attention_mask"""] def __init__( self , snake_case , snake_case , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , **snake_case , ): lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token super().__init__( errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , **snake_case_ , ) with open(snake_case_ , encoding='utf-8' ) as vocab_handle: lowercase = json.load(snake_case_ ) lowercase = {v: k for k, v in self.encoder.items()} lowercase = errors # how to handle errors in decoding lowercase = bytes_to_unicode() lowercase = {v: k for k, v in self.byte_encoder.items()} with open(snake_case_ , encoding='utf-8' ) as merges_handle: lowercase = merges_handle.read().split('\n' )[1:-1] lowercase = [tuple(merge.split() ) for merge in bpe_merges] lowercase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) ) lowercase = {} lowercase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowercase = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property def SCREAMING_SNAKE_CASE__ ( self ): return len(self.encoder ) def SCREAMING_SNAKE_CASE__ ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def SCREAMING_SNAKE_CASE__ ( self , snake_case ): if token in self.cache: return self.cache[token] lowercase = tuple(snake_case_ ) lowercase = get_pairs(snake_case_ ) if not pairs: return token while True: lowercase = min(snake_case_ , key=lambda snake_case : self.bpe_ranks.get(snake_case_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break lowercase , lowercase = bigram lowercase = [] lowercase = 0 while i < len(snake_case_ ): try: lowercase = word.index(snake_case_ , snake_case_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowercase = j if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowercase = tuple(snake_case_ ) lowercase = new_word if len(snake_case_ ) == 1: break else: lowercase = get_pairs(snake_case_ ) lowercase = ' '.join(snake_case_ ) lowercase = word return word def SCREAMING_SNAKE_CASE__ ( self , snake_case ): lowercase = [] for token in re.findall(self.pat , snake_case_ ): lowercase = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case_ ).split(' ' ) ) return bpe_tokens def SCREAMING_SNAKE_CASE__ ( self , snake_case ): return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) ) def SCREAMING_SNAKE_CASE__ ( self , snake_case ): return self.decoder.get(snake_case_ ) def SCREAMING_SNAKE_CASE__ ( self , snake_case ): lowercase = ''.join(snake_case_ ) lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ): if not os.path.isdir(snake_case_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase = os.path.join( snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) lowercase = os.path.join( snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(snake_case_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + '\n' ) lowercase = 0 with open(snake_case_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) lowercase = token_index writer.write(' '.join(snake_case_ ) + '\n' ) index += 1 return vocab_file, merge_file def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase = [self.cls_token_id] lowercase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ ) if token_ids_a is None: return [1] + ([0] * len(snake_case_ )) + [1] return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1] def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ): lowercase = [self.sep_token_id] lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=False , **snake_case ): lowercase = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(snake_case_ ) > 0 and not text[0].isspace()): lowercase = ' ' + text return (text, kwargs)
195
'''simple docstring''' import math def UpperCAmelCase_ ( __lowercase : int ) -> bool: '''simple docstring''' return math.sqrt(__lowercase ) * math.sqrt(__lowercase ) == num def UpperCAmelCase_ ( __lowercase : int ) -> bool: '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = n while left <= right: _UpperCAmelCase = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: _UpperCAmelCase = mid - 1 else: _UpperCAmelCase = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
22
0
"""simple docstring""" from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING a = logging.get_logger(__name__) a = Dict[str, Any] a = List[Prediction] @add_end_docstrings(lowerCAmelCase_ ) class SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ ): def __init__( self : List[str] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Union[str, Any] ): super().__init__(*snake_case_ , **snake_case_ ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , """vision""" ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def __lowercase ( self : List[Any] , **lowerCAmelCase : Optional[Any] ): lowerCAmelCase = {} if "threshold" in kwargs: lowerCAmelCase = kwargs["""threshold"""] return {}, {}, postprocess_kwargs def __call__( self : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int] ): return super().__call__(*snake_case_ , **snake_case_ ) def __lowercase ( self : Any , lowerCAmelCase : Dict ): lowerCAmelCase = load_image(snake_case_ ) lowerCAmelCase = torch.IntTensor([[image.height, image.width]] ) lowerCAmelCase = self.image_processor(images=[image] , return_tensors="""pt""" ) if self.tokenizer is not None: lowerCAmelCase = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" ) lowerCAmelCase = target_size return inputs def __lowercase ( self : Optional[int] , lowerCAmelCase : str ): lowerCAmelCase = model_inputs.pop("""target_size""" ) lowerCAmelCase = self.model(**snake_case_ ) lowerCAmelCase = outputs.__class__({"""target_size""": target_size, **outputs} ) if self.tokenizer is not None: lowerCAmelCase = model_inputs["""bbox"""] return model_outputs def __lowercase ( self : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str=0.9 ): lowerCAmelCase = model_outputs["""target_size"""] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. lowerCAmelCase , lowerCAmelCase = target_size[0].tolist() def unnormalize(lowerCAmelCase : List[Any] ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) lowerCAmelCase , lowerCAmelCase = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) lowerCAmelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] lowerCAmelCase = [unnormalize(snake_case_ ) for bbox in model_outputs["""bbox"""].squeeze(0 )] lowerCAmelCase = ["""score""", """label""", """box"""] lowerCAmelCase = [dict(zip(snake_case_ , snake_case_ ) ) for vals in zip(scores.tolist() , snake_case_ , snake_case_ ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel lowerCAmelCase = self.image_processor.post_process_object_detection(snake_case_ , snake_case_ , snake_case_ ) lowerCAmelCase = raw_annotations[0] lowerCAmelCase = raw_annotation["""scores"""] lowerCAmelCase = raw_annotation["""labels"""] lowerCAmelCase = raw_annotation["""boxes"""] lowerCAmelCase = scores.tolist() lowerCAmelCase = [self.model.config.idalabel[label.item()] for label in labels] lowerCAmelCase = [self._get_bounding_box(snake_case_ ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] lowerCAmelCase = ["""score""", """label""", """box"""] lowerCAmelCase = [ dict(zip(snake_case_ , snake_case_ ) ) for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] ) ] return annotation def __lowercase ( self : Optional[int] , lowerCAmelCase : "torch.Tensor" ): if self.framework != "pt": raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = box.int().tolist() lowerCAmelCase = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
155
'''simple docstring''' import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin __SCREAMING_SNAKE_CASE :Dict = 1e-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class A_ : def __init__( self : List[Any] , snake_case_ : int , snake_case_ : Dict=1_6 , snake_case_ : Dict=1_3 , snake_case_ : int=7 , snake_case_ : Any=1_4 , snake_case_ : int=1_0 , snake_case_ : Any=1_9 , snake_case_ : int=5 , snake_case_ : Any=4 , snake_case_ : Tuple=True , snake_case_ : Optional[int]=1_6 , snake_case_ : List[str]=2 , snake_case_ : Any=4 , snake_case_ : List[Any]=4 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Optional[int]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Tuple=[1, 2, 3, 4, 5] , snake_case_ : str=2_5 , snake_case_ : Any=5 , ): _UpperCAmelCase = d_model _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = prediction_length _UpperCAmelCase = context_length _UpperCAmelCase = cardinality _UpperCAmelCase = num_time_features _UpperCAmelCase = lags_sequence _UpperCAmelCase = embedding_dimension _UpperCAmelCase = is_training _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = context_length _UpperCAmelCase = prediction_length + label_length _UpperCAmelCase = label_length _UpperCAmelCase = moving_average _UpperCAmelCase = autocorrelation_factor def lowercase ( self : Union[str, Any] ): return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def lowercase ( self : int , snake_case_ : Optional[Any] ): _UpperCAmelCase = config.context_length + max(config.lags_sequence ) _UpperCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) _UpperCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) _UpperCAmelCase = floats_tensor([self.batch_size, _past_length] ) _UpperCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs _UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) _UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length] ) _UpperCAmelCase = { "past_values": past_values, "static_categorical_features": static_categorical_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def lowercase ( self : List[Any] ): _UpperCAmelCase = self.get_config() _UpperCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ ) return config, inputs_dict def lowercase ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs() return config, inputs_dict def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : Optional[int] ): _UpperCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval() _UpperCAmelCase = model(**snake_case_ ) _UpperCAmelCase = outputs.encoder_last_hidden_state _UpperCAmelCase = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = model.get_encoder() encoder.save_pretrained(snake_case_ ) _UpperCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = model.create_network_inputs(**snake_case_ ) _UpperCAmelCase , _UpperCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) _UpperCAmelCase = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) _UpperCAmelCase = encoder(inputs_embeds=snake_case_ )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) _UpperCAmelCase = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) _UpperCAmelCase = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) _UpperCAmelCase = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) _UpperCAmelCase = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = model.get_decoder() decoder.save_pretrained(snake_case_ ) _UpperCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ ) _UpperCAmelCase = decoder( trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _lowerCamelCase : List[Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () _lowerCamelCase : Tuple = (AutoformerForPrediction,) if is_torch_available() else () _lowerCamelCase : List[Any] = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {} _lowerCamelCase : Optional[Any] = False _lowerCamelCase : Tuple = False _lowerCamelCase : int = False _lowerCamelCase : Optional[Any] = False _lowerCamelCase : Optional[Any] = False _lowerCamelCase : List[Any] = False def lowercase ( self : Tuple ): _UpperCAmelCase = AutoformerModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ ) def lowercase ( self : Optional[Any] ): self.config_tester.run_common_tests() def lowercase ( self : Union[str, Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case_ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(snake_case_ ) _UpperCAmelCase , _UpperCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ ) self.assertEqual(info["missing_keys"] , [] ) def lowercase ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ ) @unittest.skip(reason="Model has no tokens embeddings" ) def lowercase ( self : Optional[int] ): pass def lowercase ( self : Optional[int] ): _UpperCAmelCase = inspect.signature(getattr(snake_case_ , "forward" ) ) # The main input is the name of the argument after `self` _UpperCAmelCase = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , snake_case_ ) def lowercase ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case_ ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("future_observed_mask" ) expected_arg_names.extend( [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ ) def lowercase ( self : Optional[int] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = True _UpperCAmelCase = getattr(self.model_tester , "seq_length" , snake_case_ ) _UpperCAmelCase = getattr(self.model_tester , "decoder_seq_length" , snake_case_ ) _UpperCAmelCase = getattr(self.model_tester , "encoder_seq_length" , snake_case_ ) _UpperCAmelCase = getattr(self.model_tester , "d_model" , snake_case_ ) _UpperCAmelCase = getattr(self.model_tester , "num_attention_heads" , snake_case_ ) _UpperCAmelCase = d_model // num_attention_heads for model_class in self.all_model_classes: _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) _UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) _UpperCAmelCase = outputs.encoder_attentions self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) _UpperCAmelCase = len(snake_case_ ) _UpperCAmelCase = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(snake_case_ , snake_case_ ) # decoder attentions _UpperCAmelCase = outputs.decoder_attentions self.assertIsInstance(snake_case_ , (list, tuple) ) self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions _UpperCAmelCase = outputs.cross_attentions self.assertIsInstance(snake_case_ , (list, tuple) ) self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) self.assertEqual(out_len + 2 , len(snake_case_ ) ) _UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def lowercase ( self : Dict ): super().test_retain_grad_hidden_states_attentions() def UpperCAmelCase_ ( __lowercase : str="train-batch.pt" ) -> List[str]: '''simple docstring''' _UpperCAmelCase = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__lowercase , repo_type="dataset" ) _UpperCAmelCase = torch.load(__lowercase , map_location=__lowercase ) return batch @require_torch @slow class A_ ( unittest.TestCase ): def lowercase ( self : Optional[int] ): _UpperCAmelCase = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ ) _UpperCAmelCase = prepare_batch() with torch.no_grad(): _UpperCAmelCase = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0] _UpperCAmelCase = torch.Size( (6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , snake_case_ ) _UpperCAmelCase = torch.tensor( [[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=snake_case_ ) self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : Optional[Any] ): _UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ ) _UpperCAmelCase = prepare_batch("val-batch.pt" ) with torch.no_grad(): _UpperCAmelCase = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state _UpperCAmelCase = torch.Size((6_4, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , snake_case_ ) _UpperCAmelCase = torch.tensor( [[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=snake_case_ ) self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : Tuple ): _UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ ) _UpperCAmelCase = prepare_batch("val-batch.pt" ) with torch.no_grad(): _UpperCAmelCase = model.generate( static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , ) _UpperCAmelCase = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , snake_case_ ) _UpperCAmelCase = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=snake_case_ ) _UpperCAmelCase = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
22
0
"""simple docstring""" import os from pathlib import Path def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : Optional[int] , __snake_case : Union[str, Any] ): '''simple docstring''' lowercase = { 'en': 'Machine learning is great, isn\'t it?', 'ru': 'Машинное обучение - это здорово, не так ли?', 'de': 'Maschinelles Lernen ist großartig, oder?', } # BLUE scores as follows: # "pair": [fairseq, transformers] lowercase = { 'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'], 'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'], 'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'], 'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'], } lowercase = f'{src_lang}-{tgt_lang}' lowercase = f'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n' os.makedirs(__lowercase , exist_ok=__lowercase ) lowercase = os.path.join(__lowercase , 'README.md' ) print(f'Generating {path}' ) with open(__lowercase , 'w' , encoding='utf-8' ) as f: f.write(__lowercase ) # make sure we are under the root of the project _UpperCamelCase : Optional[Any] = Path(__file__).resolve().parent.parent.parent _UpperCamelCase : Any = repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: _UpperCamelCase : Optional[Any] = model_name.split('-') _UpperCamelCase : List[str] = model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
220
'''simple docstring''' from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging __SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__) class A_ : _lowerCamelCase : str _lowerCamelCase : str = None @staticmethod def lowercase ( ): raise NotImplementedError def lowercase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : str , **snake_case_ : List[Any] ): raise NotImplementedError def lowercase ( self : Any , snake_case_ : int ): raise NotImplementedError def lowercase ( self : List[str] ): if not self.is_available(): raise RuntimeError( f'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' ) @classmethod def lowercase ( cls : List[Any] ): return f'`pip install {cls.pip_package or cls.name}`' class A_ ( lowerCAmelCase_ ): _lowerCamelCase : int = """optuna""" @staticmethod def lowercase ( ): return is_optuna_available() def lowercase ( self : List[str] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : Tuple ): return run_hp_search_optuna(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) def lowercase ( self : int , snake_case_ : Optional[int] ): return default_hp_space_optuna(snake_case_ ) class A_ ( lowerCAmelCase_ ): _lowerCamelCase : Any = """ray""" _lowerCamelCase : Tuple = """'ray[tune]'""" @staticmethod def lowercase ( ): return is_ray_available() def lowercase ( self : Optional[Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : List[str] ): return run_hp_search_ray(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) def lowercase ( self : Any , snake_case_ : str ): return default_hp_space_ray(snake_case_ ) class A_ ( lowerCAmelCase_ ): _lowerCamelCase : int = """sigopt""" @staticmethod def lowercase ( ): return is_sigopt_available() def lowercase ( self : Any , snake_case_ : int , snake_case_ : int , snake_case_ : str , **snake_case_ : Dict ): return run_hp_search_sigopt(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) def lowercase ( self : Dict , snake_case_ : Optional[Any] ): return default_hp_space_sigopt(snake_case_ ) class A_ ( lowerCAmelCase_ ): _lowerCamelCase : Optional[int] = """wandb""" @staticmethod def lowercase ( ): return is_wandb_available() def lowercase ( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : str , **snake_case_ : Optional[Any] ): return run_hp_search_wandb(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) def lowercase ( self : Any , snake_case_ : Union[str, Any] ): return default_hp_space_wandb(snake_case_ ) __SCREAMING_SNAKE_CASE :Dict = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def UpperCAmelCase_ ( ) -> str: '''simple docstring''' _UpperCAmelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(__lowercase ) > 0: _UpperCAmelCase = available_backends[0].name if len(__lowercase ) > 1: logger.info( f'{len(__lowercase )} hyperparameter search backends available. Using {name} as the default.' ) return name raise RuntimeError( "No hyperparameter search backend available.\n" + "\n".join( f' - To install {backend.name} run {backend.pip_install()}' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
22
0
'''simple docstring''' import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase : def __init__( self : int , __lowercase : str , __lowercase : Any=13 , __lowercase : Dict=7 , __lowercase : str=True , __lowercase : Optional[int]=True , __lowercase : Union[str, Any]=True , __lowercase : Union[str, Any]=True , __lowercase : Union[str, Any]=99 , __lowercase : List[str]=24 , __lowercase : Dict=2 , __lowercase : int=6 , __lowercase : Optional[int]=37 , __lowercase : Any="gelu" , __lowercase : str=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Union[str, Any]=512 , __lowercase : Union[str, Any]=16 , __lowercase : List[str]=2 , __lowercase : str=0.0_2 , __lowercase : int=3 , __lowercase : List[str]=None , __lowercase : Optional[Any]=1000 , ): """simple docstring""" __lowercase =parent __lowercase =batch_size __lowercase =seq_length __lowercase =is_training __lowercase =use_input_mask __lowercase =use_token_type_ids __lowercase =use_labels __lowercase =vocab_size __lowercase =hidden_size __lowercase =num_hidden_layers __lowercase =num_attention_heads __lowercase =intermediate_size __lowercase =hidden_act __lowercase =hidden_dropout_prob __lowercase =attention_probs_dropout_prob __lowercase =max_position_embeddings __lowercase =type_vocab_size __lowercase =type_sequence_label_size __lowercase =initializer_range __lowercase =num_labels __lowercase =scope __lowercase =range_bbox def snake_case ( self : Dict ): """simple docstring""" __lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase =ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __lowercase =bbox[i, j, 3] __lowercase =bbox[i, j, 1] __lowercase =t if bbox[i, j, 2] < bbox[i, j, 0]: __lowercase =bbox[i, j, 2] __lowercase =bbox[i, j, 0] __lowercase =t __lowercase =None if self.use_input_mask: __lowercase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __lowercase =None if self.use_token_type_ids: __lowercase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase =None __lowercase =None if self.use_labels: __lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase =self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def snake_case ( self : int ): """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def snake_case ( self : Optional[int] , __lowercase : Any , __lowercase : Dict , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Dict , ): """simple docstring""" __lowercase =LiltModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() __lowercase =model(snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ ) __lowercase =model(snake_case_ , bbox=snake_case_ , token_type_ids=snake_case_ ) __lowercase =model(snake_case_ , bbox=snake_case_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def snake_case ( self : int , __lowercase : List[Any] , __lowercase : int , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Dict , __lowercase : Optional[int] , __lowercase : List[str] , ): """simple docstring""" __lowercase =self.num_labels __lowercase =LiltForTokenClassification(config=snake_case_ ) model.to(snake_case_ ) model.eval() __lowercase =model( snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case ( self : List[Any] , __lowercase : Tuple , __lowercase : str , __lowercase : Dict , __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : Any , __lowercase : str , ): """simple docstring""" __lowercase =LiltForQuestionAnswering(config=snake_case_ ) model.to(snake_case_ ) model.eval() __lowercase =model( snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case ( self : int ): """simple docstring""" __lowercase =self.prepare_config_and_inputs() ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) =config_and_inputs __lowercase ={ 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): lowerCAmelCase_ = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) lowerCAmelCase_ = ( { """feature-extraction""": LiltModel, """question-answering""": LiltForQuestionAnswering, """text-classification""": LiltForSequenceClassification, """token-classification""": LiltForTokenClassification, """zero-shot""": LiltForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False def snake_case ( self : List[Any] , __lowercase : str , __lowercase : str , __lowercase : Dict , __lowercase : Any , __lowercase : Any ): """simple docstring""" return True def snake_case ( self : Tuple ): """simple docstring""" __lowercase =LiltModelTester(self ) __lowercase =ConfigTester(self , config_class=snake_case_ , hidden_size=37 ) def snake_case ( self : Optional[Any] ): """simple docstring""" self.config_tester.run_common_tests() def snake_case ( self : Any ): """simple docstring""" __lowercase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def snake_case ( self : Union[str, Any] ): """simple docstring""" __lowercase =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowercase =type self.model_tester.create_and_check_model(*snake_case_ ) def snake_case ( self : Union[str, Any] ): """simple docstring""" __lowercase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case_ ) def snake_case ( self : Tuple ): """simple docstring""" __lowercase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case_ ) @slow def snake_case ( self : Tuple ): """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase =LiltModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) @require_torch @slow class lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : Tuple ): """simple docstring""" __lowercase =LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(snake_case_ ) __lowercase =torch.tensor([[1, 2]] , device=snake_case_ ) __lowercase =torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=snake_case_ ) # forward pass with torch.no_grad(): __lowercase =model(input_ids=snake_case_ , bbox=snake_case_ ) __lowercase =torch.Size([1, 2, 768] ) __lowercase =torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=snake_case_ , ) self.assertTrue(outputs.last_hidden_state.shape , snake_case_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , snake_case_ , atol=1E-3 ) )
141
'''simple docstring''' __SCREAMING_SNAKE_CASE :List[str] = '''0.18.2''' from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
22
0
import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file lowerCamelCase : Any = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.''' def SCREAMING_SNAKE_CASE__ ( lowercase=None ) -> List[str]: if subparsers is not None: snake_case : int = subparsers.add_parser("""tpu-config""" ,description=_description ) else: snake_case : List[str] = argparse.ArgumentParser("""Accelerate tpu-config command""" ,description=_description ) # Core arguments snake_case : Optional[int] = parser.add_argument_group( """Config Arguments""" ,"""Arguments that can be configured through `accelerate config`.""" ) config_args.add_argument( """--config_file""" ,type=__lowercase ,default=__lowercase ,help="""Path to the config file to use for accelerate.""" ,) config_args.add_argument( """--tpu_name""" ,default=__lowercase ,help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" ,) config_args.add_argument( """--tpu_zone""" ,default=__lowercase ,help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" ,) snake_case : int = parser.add_argument_group("""TPU Arguments""" ,"""Arguments for options ran inside the TPU.""" ) pod_args.add_argument( """--use_alpha""" ,action="""store_true""" ,help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" ,) pod_args.add_argument( """--command_file""" ,default=__lowercase ,help="""The path to the file containing the commands to run on the pod on startup.""" ,) pod_args.add_argument( """--command""" ,action="""append""" ,nargs="""+""" ,help="""A command to run on the pod. Can be passed multiple times.""" ,) pod_args.add_argument( """--install_accelerate""" ,action="""store_true""" ,help="""Whether to install accelerate on the pod. Defaults to False.""" ,) pod_args.add_argument( """--accelerate_version""" ,default="""latest""" ,help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" ,) pod_args.add_argument( """--debug""" ,action="""store_true""" ,help="""If set, will print the command that would be run instead of running it.""" ) if subparsers is not None: parser.set_defaults(func=__lowercase ) return parser def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]: snake_case : List[Any] = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(__lowercase ): snake_case : List[Any] = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: snake_case : List[str] = defaults.command_file if not args.command and defaults.commands is not None: snake_case : Any = defaults.commands if not args.tpu_name: snake_case : str = defaults.tpu_name if not args.tpu_zone: snake_case : int = defaults.tpu_zone if args.accelerate_version == "dev": snake_case : List[Any] = """git+https://github.com/huggingface/accelerate.git""" elif args.accelerate_version == "latest": snake_case : Optional[int] = """accelerate -U""" elif isinstance(parse(args.accelerate_version ) ,__lowercase ): snake_case : Dict = f"""accelerate=={args.accelerate_version}""" if not args.command_file and not args.command: raise ValueError("""You must specify either a command file or a command to run on the pod.""" ) if args.command_file: with open(args.command_file ,"""r""" ) as f: snake_case : List[Any] = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] ,__lowercase ): snake_case : Optional[int] = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate snake_case : Dict = ["""cd /usr/share"""] if args.install_accelerate: new_cmd += [f"""pip install {args.accelerate_version}"""] new_cmd += args.command snake_case : Tuple = """; """.join(__lowercase ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess snake_case : Optional[int] = ["""gcloud"""] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(f"""Running {" ".join(__lowercase )}""" ) return subprocess.run(__lowercase ) print("""Successfully setup pod.""" ) def SCREAMING_SNAKE_CASE__ ( ) -> str: snake_case : int = tpu_command_parser() snake_case : str = parser.parse_args() tpu_command_launcher(__lowercase )
124
'''simple docstring''' import re from filelock import FileLock try: import nltk __SCREAMING_SNAKE_CASE :Optional[int] = True except (ImportError, ModuleNotFoundError): __SCREAMING_SNAKE_CASE :str = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def UpperCAmelCase_ ( __lowercase : str ) -> str: '''simple docstring''' re.sub("<n>" , "" , __lowercase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__lowercase ) )
22
0
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. __A : List[Any] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''} @is_pipeline_test class A_ (unittest.TestCase ): UpperCAmelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: UpperCAmelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: UpperCAmelCase__ = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def _lowercase ( self , _A , _A , _A ): '''simple docstring''' UpperCAmelCase = ZeroShotClassificationPipeline( model=snake_case_ , tokenizer=snake_case_ , candidate_labels=['''polics''', '''health'''] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def _lowercase ( self , _A , _A ): '''simple docstring''' UpperCAmelCase = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' ) self.assertEqual(snake_case_ , {'''sequence''': ANY(snake_case_ ), '''labels''': [ANY(snake_case_ )], '''scores''': [ANY(snake_case_ )]} ) # No kwarg UpperCAmelCase = classifier('''Who are you voting for in 2020?''' , ['''politics'''] ) self.assertEqual(snake_case_ , {'''sequence''': ANY(snake_case_ ), '''labels''': [ANY(snake_case_ )], '''scores''': [ANY(snake_case_ )]} ) UpperCAmelCase = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] ) self.assertEqual(snake_case_ , {'''sequence''': ANY(snake_case_ ), '''labels''': [ANY(snake_case_ )], '''scores''': [ANY(snake_case_ )]} ) UpperCAmelCase = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' ) self.assertEqual( snake_case_ , {'''sequence''': ANY(snake_case_ ), '''labels''': [ANY(snake_case_ ), ANY(snake_case_ )], '''scores''': [ANY(snake_case_ ), ANY(snake_case_ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 ) UpperCAmelCase = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] ) self.assertEqual( snake_case_ , {'''sequence''': ANY(snake_case_ ), '''labels''': [ANY(snake_case_ ), ANY(snake_case_ )], '''scores''': [ANY(snake_case_ ), ANY(snake_case_ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 ) UpperCAmelCase = classifier( '''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' ) self.assertEqual(snake_case_ , {'''sequence''': ANY(snake_case_ ), '''labels''': [ANY(snake_case_ )], '''scores''': [ANY(snake_case_ )]} ) # https://github.com/huggingface/transformers/issues/13846 UpperCAmelCase = classifier(['''I am happy'''] , ['''positive''', '''negative'''] ) self.assertEqual( snake_case_ , [ {'''sequence''': ANY(snake_case_ ), '''labels''': [ANY(snake_case_ ), ANY(snake_case_ )], '''scores''': [ANY(snake_case_ ), ANY(snake_case_ )]} for i in range(1 ) ] , ) UpperCAmelCase = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] ) self.assertEqual( snake_case_ , [ {'''sequence''': ANY(snake_case_ ), '''labels''': [ANY(snake_case_ ), ANY(snake_case_ )], '''scores''': [ANY(snake_case_ ), ANY(snake_case_ )]} for i in range(2 ) ] , ) with self.assertRaises(snake_case_ ): classifier('''''' , candidate_labels='''politics''' ) with self.assertRaises(snake_case_ ): classifier(snake_case_ , candidate_labels='''politics''' ) with self.assertRaises(snake_case_ ): classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' ) with self.assertRaises(snake_case_ ): classifier('''Who are you voting for in 2020?''' , candidate_labels=snake_case_ ) with self.assertRaises(snake_case_ ): classifier( '''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , ) with self.assertRaises(snake_case_ ): classifier( '''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=snake_case_ , ) self.run_entailment_id(snake_case_ ) def _lowercase ( self , _A ): '''simple docstring''' UpperCAmelCase = zero_shot_classifier.model.config UpperCAmelCase = config.labelaid UpperCAmelCase = zero_shot_classifier.entailment_id UpperCAmelCase = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) UpperCAmelCase = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) UpperCAmelCase = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) UpperCAmelCase = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) UpperCAmelCase = original_labelaid self.assertEqual(snake_case_ , zero_shot_classifier.entailment_id ) @require_torch def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = pipeline( '''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( '''Who are you voting for in 2020?''' * 1_0_0 , candidate_labels=['''politics''', '''public health''', '''science'''] ) @require_torch def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = pipeline( '''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , ) UpperCAmelCase = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] ) self.assertEqual( nested_simplify(snake_case_ ) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''science''', '''public health''', '''politics'''], '''scores''': [0.3_33, 0.3_33, 0.3_33], } , ) @require_tf def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = pipeline( '''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , ) UpperCAmelCase = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] ) self.assertEqual( nested_simplify(snake_case_ ) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''science''', '''public health''', '''politics'''], '''scores''': [0.3_33, 0.3_33, 0.3_33], } , ) @slow @require_torch def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' ) UpperCAmelCase = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] ) self.assertEqual( nested_simplify(snake_case_ ) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''politics''', '''public health''', '''science'''], '''scores''': [0.9_76, 0.0_15, 0.0_09], } , ) UpperCAmelCase = zero_shot_classifier( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks''' ''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder''' ''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based''' ''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two''' ''' machine translation tasks show these models to be superior in quality while being more parallelizable''' ''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014''' ''' English-to-German translation task, improving over the existing best results, including ensembles by''' ''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new''' ''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small''' ''' fraction of the training costs of the best models from the literature. We show that the Transformer''' ''' generalizes well to other tasks by applying it successfully to English constituency parsing both with''' ''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=snake_case_ , ) self.assertEqual( nested_simplify(snake_case_ ) , { '''sequence''': ( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural''' ''' networks in an encoder-decoder configuration. The best performing models also connect the''' ''' encoder and decoder through an attention mechanism. We propose a new simple network''' ''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence''' ''' and convolutions entirely. Experiments on two machine translation tasks show these models to be''' ''' superior in quality while being more parallelizable and requiring significantly less time to''' ''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,''' ''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014''' ''' English-to-French translation task, our model establishes a new single-model state-of-the-art''' ''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training''' ''' costs of the best models from the literature. We show that the Transformer generalizes well to''' ''' other tasks by applying it successfully to English constituency parsing both with large and''' ''' limited training data.''' ), '''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''], '''scores''': [0.8_17, 0.7_13, 0.0_18, 0.0_18], } , ) @slow @require_tf def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' ) UpperCAmelCase = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] ) self.assertEqual( nested_simplify(snake_case_ ) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''politics''', '''public health''', '''science'''], '''scores''': [0.9_76, 0.0_15, 0.0_09], } , ) UpperCAmelCase = zero_shot_classifier( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks''' ''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder''' ''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based''' ''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two''' ''' machine translation tasks show these models to be superior in quality while being more parallelizable''' ''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014''' ''' English-to-German translation task, improving over the existing best results, including ensembles by''' ''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new''' ''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small''' ''' fraction of the training costs of the best models from the literature. We show that the Transformer''' ''' generalizes well to other tasks by applying it successfully to English constituency parsing both with''' ''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=snake_case_ , ) self.assertEqual( nested_simplify(snake_case_ ) , { '''sequence''': ( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural''' ''' networks in an encoder-decoder configuration. The best performing models also connect the''' ''' encoder and decoder through an attention mechanism. We propose a new simple network''' ''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence''' ''' and convolutions entirely. Experiments on two machine translation tasks show these models to be''' ''' superior in quality while being more parallelizable and requiring significantly less time to''' ''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,''' ''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014''' ''' English-to-French translation task, our model establishes a new single-model state-of-the-art''' ''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training''' ''' costs of the best models from the literature. We show that the Transformer generalizes well to''' ''' other tasks by applying it successfully to English constituency parsing both with large and''' ''' limited training data.''' ), '''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''], '''scores''': [0.8_17, 0.7_13, 0.0_18, 0.0_18], } , )
273
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class A_ : def __init__( self : str , snake_case_ : int , snake_case_ : Union[str, Any]=2 , snake_case_ : List[Any]=True , snake_case_ : str=False , snake_case_ : str=1_0 , snake_case_ : str=3 , snake_case_ : Dict=3_2 * 4 , snake_case_ : Any=3_2 * 6 , snake_case_ : Optional[Any]=4 , snake_case_ : Optional[int]=3_2 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = is_training _UpperCAmelCase = use_auxiliary_loss _UpperCAmelCase = num_queries _UpperCAmelCase = num_channels _UpperCAmelCase = min_size _UpperCAmelCase = max_size _UpperCAmelCase = num_labels _UpperCAmelCase = mask_feature_size def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( snake_case_ ) _UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_ ) _UpperCAmelCase = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_ ) > 0.5 ).float() _UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=snake_case_ ) > 0.5).long() _UpperCAmelCase = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase ( self : List[Any] ): return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def lowercase ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def lowercase ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ): _UpperCAmelCase = output.encoder_hidden_states _UpperCAmelCase = output.pixel_decoder_hidden_states _UpperCAmelCase = output.transformer_decoder_hidden_states self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(snake_case_ ) , config.decoder_config.decoder_layers ) def lowercase ( self : Tuple , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Optional[Any]=False ): with torch.no_grad(): _UpperCAmelCase = MaskFormerModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() _UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ ) _UpperCAmelCase = model(snake_case_ , output_hidden_states=snake_case_ ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(snake_case_ , snake_case_ ) def lowercase ( self : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : str , snake_case_ : List[Any] ): _UpperCAmelCase = MaskFormerForInstanceSegmentation(config=snake_case_ ) model.to(snake_case_ ) model.eval() def comm_check_on_output(snake_case_ : int ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ ) _UpperCAmelCase = model(snake_case_ ) comm_check_on_output(snake_case_ ) _UpperCAmelCase = model( pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ) comm_check_on_output(snake_case_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _lowerCamelCase : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () _lowerCamelCase : Tuple = ( {"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) _lowerCamelCase : Optional[Any] = False _lowerCamelCase : Dict = False _lowerCamelCase : Any = False _lowerCamelCase : List[Any] = False def lowercase ( self : Optional[int] ): _UpperCAmelCase = MaskFormerModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ ) def lowercase ( self : Optional[Any] ): self.config_tester.run_common_tests() def lowercase ( self : Union[str, Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ ) def lowercase ( self : int ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*snake_case_ ) @unittest.skip(reason="MaskFormer does not use inputs_embeds" ) def lowercase ( self : Any ): pass @unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" ) def lowercase ( self : List[str] ): pass @unittest.skip(reason="MaskFormer is not a generative model" ) def lowercase ( self : List[str] ): pass @unittest.skip(reason="MaskFormer does not use token embeddings" ) def lowercase ( self : List[Any] ): pass @require_torch_multi_gpu @unittest.skip( reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def lowercase ( self : Any ): pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def lowercase ( self : Union[str, Any] ): pass def lowercase ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case_ ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case_ ) @slow def lowercase ( self : Optional[int] ): for model_name in ["facebook/maskformer-swin-small-coco"]: _UpperCAmelCase = MaskFormerModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def lowercase ( self : Optional[int] ): _UpperCAmelCase = (self.model_tester.min_size,) * 2 _UpperCAmelCase = { "pixel_values": torch.randn((2, 3, *size) , device=snake_case_ ), "mask_labels": torch.randn((2, 1_0, *size) , device=snake_case_ ), "class_labels": torch.zeros(2 , 1_0 , device=snake_case_ ).long(), } _UpperCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(snake_case_ ) _UpperCAmelCase = model(**snake_case_ ) self.assertTrue(outputs.loss is not None ) def lowercase ( self : Dict ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ ) def lowercase ( self : Any ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case_ ).to(snake_case_ ) _UpperCAmelCase = model(**snake_case_ , output_attentions=snake_case_ ) self.assertTrue(outputs.attentions is not None ) def lowercase ( self : int ): if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _UpperCAmelCase = self.all_model_classes[1] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.train() _UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ).loss loss.backward() def lowercase ( self : int ): # only MaskFormerForInstanceSegmentation has the loss _UpperCAmelCase = self.all_model_classes[1] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.train() _UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ) _UpperCAmelCase = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _UpperCAmelCase = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _UpperCAmelCase = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _UpperCAmelCase = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=snake_case_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __SCREAMING_SNAKE_CASE :Dict = 1e-4 def UpperCAmelCase_ ( ) -> List[str]: '''simple docstring''' _UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_vision @slow class A_ ( unittest.TestCase ): @cached_property def lowercase ( self : Dict ): return ( MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" ) if is_vision_available() else None ) def lowercase ( self : List[Any] ): _UpperCAmelCase = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(snake_case_ ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ ) _UpperCAmelCase = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) _UpperCAmelCase = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(snake_case_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) _UpperCAmelCase = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(snake_case_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) _UpperCAmelCase = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(snake_case_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : Tuple ): _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(snake_case_ ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ ) _UpperCAmelCase = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) # masks_queries_logits _UpperCAmelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _UpperCAmelCase = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] _UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) # class_queries_logits _UpperCAmelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _UpperCAmelCase = torch.tensor( [ [1.6_512e00, -5.2_572e00, -3.3_519e00], [3.6_169e-02, -5.9_025e00, -2.9_313e00], [1.0_766e-04, -7.7_630e00, -5.1_263e00], ] ).to(snake_case_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : int ): _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" ) .to(snake_case_ ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ ) _UpperCAmelCase = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) # masks_queries_logits _UpperCAmelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _UpperCAmelCase = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] _UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) # class_queries_logits _UpperCAmelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _UpperCAmelCase = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(snake_case_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : List[Any] ): _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(snake_case_ ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="pt" , ) _UpperCAmelCase = inputs["pixel_values"].to(snake_case_ ) _UpperCAmelCase = [el.to(snake_case_ ) for el in inputs["mask_labels"]] _UpperCAmelCase = [el.to(snake_case_ ) for el in inputs["class_labels"]] with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) self.assertTrue(outputs.loss is not None )
22
0
import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel from ...utils import logging __a :Union[str, Any] = logging.get_logger(__name__) class _a ( lowerCAmelCase_ ): """simple docstring""" _lowerCamelCase : Optional[Any] = CLIPConfig _lowerCamelCase : Optional[Any] = ["""CLIPEncoderLayer"""] def __init__( self : Dict , UpperCAmelCase : CLIPConfig ): super().__init__(snake_case_ ) A_ = CLIPVisionModelWithProjection(config.vision_config ) A_ = nn.Linear(config.vision_config.projection_dim , 1 ) A_ = nn.Linear(config.vision_config.projection_dim , 1 ) @torch.no_grad() def __A ( self : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int]=0.5 , UpperCAmelCase : List[str]=0.5 ): A_ = self.vision_model(snake_case_ )[0] A_ = self.p_head(snake_case_ ) A_ = nsfw_detected.flatten() A_ = nsfw_detected > p_threshold A_ = nsfw_detected.tolist() if any(snake_case_ ): logger.warning( "Potential NSFW content was detected in one or more images. A black image will be returned instead." " Try again with a different prompt and/or seed." ) for idx, nsfw_detected_ in enumerate(snake_case_ ): if nsfw_detected_: A_ = np.zeros(images[idx].shape ) A_ = self.w_head(snake_case_ ) A_ = watermark_detected.flatten() A_ = watermark_detected > w_threshold A_ = watermark_detected.tolist() if any(snake_case_ ): logger.warning( "Potential watermarked content was detected in one or more images. A black image will be returned instead." " Try again with a different prompt and/or seed." ) for idx, watermark_detected_ in enumerate(snake_case_ ): if watermark_detected_: A_ = np.zeros(images[idx].shape ) return images, nsfw_detected, watermark_detected
312
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: __SCREAMING_SNAKE_CASE :List[Any] = None __SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE :List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} __SCREAMING_SNAKE_CASE :List[Any] = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''', }, } __SCREAMING_SNAKE_CASE :Optional[Any] = { '''albert-base-v1''': 512, '''albert-large-v1''': 512, '''albert-xlarge-v1''': 512, '''albert-xxlarge-v1''': 512, '''albert-base-v2''': 512, '''albert-large-v2''': 512, '''albert-xlarge-v2''': 512, '''albert-xxlarge-v2''': 512, } __SCREAMING_SNAKE_CASE :Optional[int] = '''▁''' class A_ ( lowerCAmelCase_ ): _lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES _lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : int = AlbertTokenizer def __init__( self : Optional[Any] , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=True , snake_case_ : str=True , snake_case_ : Tuple=False , snake_case_ : List[Any]="[CLS]" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : str="<unk>" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : List[Any]="<pad>" , snake_case_ : List[str]="[CLS]" , snake_case_ : int="[MASK]" , **snake_case_ : Any , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _UpperCAmelCase = ( AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token ) super().__init__( snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , ) _UpperCAmelCase = do_lower_case _UpperCAmelCase = remove_space _UpperCAmelCase = keep_accents _UpperCAmelCase = vocab_file _UpperCAmelCase = False if not self.vocab_file else True def lowercase ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ): _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowercase ( self : Dict , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ): _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(snake_case_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return _UpperCAmelCase = os.path.join( snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ): copyfile(self.vocab_file , snake_case_ ) return (out_vocab_file,)
22
0
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class __lowercase : def __init__( self , A_ , A_=2 , A_=8 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=16 , A_=5 , A_=2 , A_=36 , A_="gelu" , A_=0.0 , A_=0.0 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) ->str: '''simple docstring''' __lowerCAmelCase : Optional[int] = parent __lowerCAmelCase : List[str] = batch_size __lowerCAmelCase : Any = seq_length __lowerCAmelCase : List[Any] = is_training __lowerCAmelCase : Union[str, Any] = use_input_mask __lowerCAmelCase : List[str] = use_token_type_ids __lowerCAmelCase : str = use_labels __lowerCAmelCase : int = vocab_size __lowerCAmelCase : str = hidden_size __lowerCAmelCase : Tuple = num_hidden_layers __lowerCAmelCase : int = num_attention_heads __lowerCAmelCase : Dict = intermediate_size __lowerCAmelCase : Dict = hidden_act __lowerCAmelCase : List[str] = hidden_dropout_prob __lowerCAmelCase : Any = attention_probs_dropout_prob __lowerCAmelCase : Optional[int] = max_position_embeddings __lowerCAmelCase : Dict = type_vocab_size __lowerCAmelCase : Dict = type_sequence_label_size __lowerCAmelCase : Tuple = initializer_range __lowerCAmelCase : Optional[Any] = num_labels __lowerCAmelCase : List[str] = num_choices __lowerCAmelCase : Optional[Any] = scope def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase : Union[str, Any] = None if self.use_input_mask: __lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase : Tuple = None if self.use_token_type_ids: __lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase : Tuple = None __lowerCAmelCase : Tuple = None __lowerCAmelCase : str = None if self.use_labels: __lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase : str = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , ) def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : List[str] = self.get_config() __lowerCAmelCase : Tuple = 300 return config def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' ( ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ) : Optional[Any] = self.prepare_config_and_inputs() __lowerCAmelCase : str = True __lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->List[str]: '''simple docstring''' __lowerCAmelCase : int = MraModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() __lowerCAmelCase : List[Any] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ ) __lowerCAmelCase : Optional[Any] = model(snake_case_ , token_type_ids=snake_case_ ) __lowerCAmelCase : str = model(snake_case_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : str = True __lowerCAmelCase : Union[str, Any] = MraModel(snake_case_ ) model.to(snake_case_ ) model.eval() __lowerCAmelCase : Optional[int] = model( snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , ) __lowerCAmelCase : Optional[Any] = model( snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , encoder_hidden_states=snake_case_ , ) __lowerCAmelCase : Dict = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : str = MraForMaskedLM(config=snake_case_ ) model.to(snake_case_ ) model.eval() __lowerCAmelCase : List[Any] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : List[str] = MraForQuestionAnswering(config=snake_case_ ) model.to(snake_case_ ) model.eval() __lowerCAmelCase : Optional[Any] = model( snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = self.num_labels __lowerCAmelCase : int = MraForSequenceClassification(snake_case_ ) model.to(snake_case_ ) model.eval() __lowerCAmelCase : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->List[str]: '''simple docstring''' __lowerCAmelCase : List[Any] = self.num_labels __lowerCAmelCase : Any = MraForTokenClassification(config=snake_case_ ) model.to(snake_case_ ) model.eval() __lowerCAmelCase : Dict = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->int: '''simple docstring''' __lowerCAmelCase : str = self.num_choices __lowerCAmelCase : Optional[Any] = MraForMultipleChoice(config=snake_case_ ) model.to(snake_case_ ) model.eval() __lowerCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase : Optional[int] = model( snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ) : Optional[Any] = config_and_inputs __lowerCAmelCase : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowercase (lowerCAmelCase_ , unittest.TestCase ): _UpperCamelCase = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = () def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = MraModelTester(self ) __lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=37 ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowerCAmelCase : Any = type self.model_tester.create_and_check_model(*snake_case_ ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case_ ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*snake_case_ ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case_ ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case_ ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case_ ) @slow def UpperCamelCase__ ( self ) ->str: '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Any = MraModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) @unittest.skip(reason='''MRA does not output attentions''' ) def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' return @require_torch class __lowercase (unittest.TestCase ): @slow def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : Tuple = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' ) __lowerCAmelCase : Tuple = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __lowerCAmelCase : Optional[int] = model(snake_case_ )[0] __lowerCAmelCase : str = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , snake_case_ ) __lowerCAmelCase : str = torch.tensor( [[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1e-4 ) ) @slow def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : Dict = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' ) __lowerCAmelCase : Any = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __lowerCAmelCase : Tuple = model(snake_case_ )[0] __lowerCAmelCase : Any = 5_0265 __lowerCAmelCase : int = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , snake_case_ ) __lowerCAmelCase : Union[str, Any] = torch.tensor( [[[9.2_595, -3.6_038, 11.8819], [9.3_869, -3.2_693, 11.0956], [11.8524, -3.4_938, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1e-4 ) ) @slow def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : Tuple = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' ) __lowerCAmelCase : str = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): __lowerCAmelCase : Dict = model(snake_case_ )[0] __lowerCAmelCase : List[str] = 5_0265 __lowerCAmelCase : Optional[Any] = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , snake_case_ ) __lowerCAmelCase : Optional[Any] = torch.tensor( [[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1e-4 ) )
275
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging __SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE :int = { '''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class A_ ( lowerCAmelCase_ ): _lowerCamelCase : int = """perceiver""" def __init__( self : Any , snake_case_ : List[Any]=2_5_6 , snake_case_ : str=1_2_8_0 , snake_case_ : Optional[int]=7_6_8 , snake_case_ : int=1 , snake_case_ : List[Any]=2_6 , snake_case_ : Dict=8 , snake_case_ : List[Any]=8 , snake_case_ : Tuple=None , snake_case_ : Tuple=None , snake_case_ : Any="kv" , snake_case_ : Any=1 , snake_case_ : List[str]=1 , snake_case_ : Optional[int]="gelu" , snake_case_ : List[Any]=0.1 , snake_case_ : Dict=0.0_2 , snake_case_ : int=1e-12 , snake_case_ : List[str]=True , snake_case_ : str=2_6_2 , snake_case_ : Optional[Any]=2_0_4_8 , snake_case_ : Union[str, Any]=5_6 , snake_case_ : Dict=[3_6_8, 4_9_6] , snake_case_ : Tuple=1_6 , snake_case_ : Union[str, Any]=1_9_2_0 , snake_case_ : List[Any]=1_6 , snake_case_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **snake_case_ : List[Any] , ): super().__init__(**snake_case_ ) _UpperCAmelCase = num_latents _UpperCAmelCase = d_latents _UpperCAmelCase = d_model _UpperCAmelCase = num_blocks _UpperCAmelCase = num_self_attends_per_block _UpperCAmelCase = num_self_attention_heads _UpperCAmelCase = num_cross_attention_heads _UpperCAmelCase = qk_channels _UpperCAmelCase = v_channels _UpperCAmelCase = cross_attention_shape_for_attention _UpperCAmelCase = self_attention_widening_factor _UpperCAmelCase = cross_attention_widening_factor _UpperCAmelCase = hidden_act _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = use_query_residual # masked language modeling attributes _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings # image classification attributes _UpperCAmelCase = image_size # flow attributes _UpperCAmelCase = train_size # multimodal autoencoding attributes _UpperCAmelCase = num_frames _UpperCAmelCase = audio_samples_per_frame _UpperCAmelCase = samples_per_patch _UpperCAmelCase = output_shape class A_ ( lowerCAmelCase_ ): @property def lowercase ( self : int ): if self.task == "multiple-choice": _UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"} else: _UpperCAmelCase = {0: "batch", 1: "sequence"} return OrderedDict( [ ("inputs", dynamic_axis), ("attention_mask", dynamic_axis), ] ) @property def lowercase ( self : Optional[Any] ): return 1e-4 def lowercase ( self : List[str] , snake_case_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , snake_case_ : int = 3 , snake_case_ : int = 4_0 , snake_case_ : int = 4_0 , ): # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(snake_case_ , snake_case_ ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _UpperCAmelCase = compute_effective_axis_dimension( snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _UpperCAmelCase = preprocessor.num_special_tokens_to_add(snake_case_ ) _UpperCAmelCase = compute_effective_axis_dimension( snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ ) # Generate dummy inputs according to compute batch and sequence _UpperCAmelCase = [" ".join(["a"] ) * seq_length] * batch_size _UpperCAmelCase = dict(preprocessor(snake_case_ , return_tensors=snake_case_ ) ) _UpperCAmelCase = inputs.pop("input_ids" ) return inputs elif isinstance(snake_case_ , snake_case_ ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _UpperCAmelCase = compute_effective_axis_dimension(snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch ) _UpperCAmelCase = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) _UpperCAmelCase = dict(preprocessor(images=snake_case_ , return_tensors=snake_case_ ) ) _UpperCAmelCase = inputs.pop("pixel_values" ) return inputs else: raise ValueError( "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
22
0
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int ): if not isinstance(__lowercase , __lowercase ): lowercase_ : Any = F'''Input value of [number={number}] must be an integer''' raise TypeError(__lowercase ) if number < 0: return False lowercase_ : Optional[Any] = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
213
'''simple docstring''' import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef __SCREAMING_SNAKE_CASE :List[str] = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Tuple ) -> int: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) return (preds == labels).mean() def UpperCAmelCase_ ( __lowercase : int , __lowercase : str ) -> Optional[Any]: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) _UpperCAmelCase = simple_accuracy(__lowercase , __lowercase ) _UpperCAmelCase = fa_score(y_true=__lowercase , y_pred=__lowercase ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : List[str] ) -> List[Any]: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) _UpperCAmelCase = pearsonr(__lowercase , __lowercase )[0] _UpperCAmelCase = spearmanr(__lowercase , __lowercase )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : str , __lowercase : str ) -> Tuple: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) assert len(__lowercase ) == len(__lowercase ), f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}' if task_name == "cola": return {"mcc": matthews_corrcoef(__lowercase , __lowercase )} elif task_name == "sst-2": return {"acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "mrpc": return acc_and_fa(__lowercase , __lowercase ) elif task_name == "sts-b": return pearson_and_spearman(__lowercase , __lowercase ) elif task_name == "qqp": return acc_and_fa(__lowercase , __lowercase ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "qnli": return {"acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "rte": return {"acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "wnli": return {"acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "hans": return {"acc": simple_accuracy(__lowercase , __lowercase )} else: raise KeyError(__lowercase ) def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Dict , __lowercase : str ) -> Union[str, Any]: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) if len(__lowercase ) != len(__lowercase ): raise ValueError(f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}' ) if task_name == "xnli": return {"acc": simple_accuracy(__lowercase , __lowercase )} else: raise KeyError(__lowercase )
22
0
'''simple docstring''' import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration lowercase_ = 50_000 lowercase_ = 5_000 lowercase_ = os.path.split(__file__) lowercase_ = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def lowerCAmelCase (__A , __A): """simple docstring""" for i in range(__lowercase): _a = dataset[i] @get_duration def lowerCAmelCase (__A , __A , __A): """simple docstring""" for i in range(0 , len(__lowercase) , __lowercase): _a = dataset[i : i + batch_size] @get_duration def lowerCAmelCase (__A , __A , __A): """simple docstring""" with dataset.formatted_as(type=__lowercase): for i in range(__lowercase): _a = dataset[i] @get_duration def lowerCAmelCase (__A , __A , __A , __A): """simple docstring""" with dataset.formatted_as(type=__lowercase): for i in range(0 , __lowercase , __lowercase): _a = dataset[i : i + batch_size] def lowerCAmelCase (): """simple docstring""" _a = {'''num examples''': SPEED_TEST_N_EXAMPLES} _a = [ (read, {'''length''': SMALL_TEST}), (read, {'''length''': SPEED_TEST_N_EXAMPLES}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_000}), (read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}), (read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}), (read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}), (read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_000}), ] _a = [ (read, {'''length''': SMALL_TEST}), (read, {'''length''': SPEED_TEST_N_EXAMPLES}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_000}), (read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('''generating dataset''') _a = datasets.Features( {'''list''': datasets.Sequence(datasets.Value('''float32''')), '''numbers''': datasets.Value('''float32''')}) _a = generate_example_dataset( os.path.join(__lowercase , '''dataset.arrow''') , __lowercase , num_examples=__lowercase , seq_shapes={'''list''': (100,)} , ) print('''first set of iterations''') for func, kwargs in functions: print(func.__name__ , str(__lowercase)) _a = func(__lowercase , **__lowercase) print('''shuffling dataset''') _a = dataset.shuffle() print('''Second set of iterations (after shuffling''') for func, kwargs in functions_shuffled: print('''shuffled ''' , func.__name__ , str(__lowercase)) _a = func( __lowercase , **__lowercase) with open(__lowercase , '''wb''') as f: f.write(json.dumps(__lowercase).encode('''utf-8''')) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
211
'''simple docstring''' import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict , __lowercase : str , __lowercase : Optional[Any] , __lowercase : str ) -> List[str]: '''simple docstring''' _UpperCAmelCase = TapasConfig.from_json_file(__lowercase ) # set absolute/relative position embeddings parameter _UpperCAmelCase = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase ) elif task == "WTQ": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = True # hparam_utils.py hparams _UpperCAmelCase = 0.66_4694 _UpperCAmelCase = 0.20_7951 _UpperCAmelCase = 0.12_1194 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = 0.035_2513 _UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = False # hparam_utils.py hparams _UpperCAmelCase = 36.4519 _UpperCAmelCase = 0.90_3421 _UpperCAmelCase = 222.088 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = 0.76_3141 _UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase ) elif task == "TABFACT": _UpperCAmelCase = TapasForSequenceClassification(config=__lowercase ) elif task == "MLM": _UpperCAmelCase = TapasForMaskedLM(config=__lowercase ) elif task == "INTERMEDIATE_PRETRAINING": _UpperCAmelCase = TapasModel(config=__lowercase ) else: raise ValueError(f'Task {task} not supported.' ) print(f'Building PyTorch model from configuration: {config}' ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__lowercase , __lowercase , __lowercase ) # Save pytorch-model (weights and configuration) print(f'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(__lowercase ) # Save tokenizer files print(f'Save tokenizer files to {pytorch_dump_path}' ) _UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 ) tokenizer.save_pretrained(__lowercase ) print("Used relative position embeddings:" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.''' ) parser.add_argument( '''--reset_position_index_per_cell''', default=False, action='''store_true''', help='''Whether to use relative position embeddings or not. Defaults to True.''', ) parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--tapas_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained TAPAS model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __SCREAMING_SNAKE_CASE :List[str] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
22
0
"""simple docstring""" def _lowercase ( __snake_case ) -> str: if isinstance(__lowercase ,__lowercase ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(__lowercase ,__lowercase ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" __lowerCAmelCase : Optional[Any] = False if num < 0: __lowerCAmelCase : Optional[int] = True __lowerCAmelCase : Union[str, Any] = -num __lowerCAmelCase : str = [] while num > 0: binary.insert(0 ,num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(__lowercase ) for e in binary ) return "0b" + "".join(str(__lowercase ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
269
'''simple docstring''' import os from datetime import datetime as dt from github import Github __SCREAMING_SNAKE_CASE :str = [ '''good first issue''', '''feature request''', '''wip''', ] def UpperCAmelCase_ ( ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase = Github(os.environ["GITHUB_TOKEN"] ) _UpperCAmelCase = g.get_repo("huggingface/accelerate" ) _UpperCAmelCase = repo.get_issues(state="open" ) for issue in open_issues: _UpperCAmelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowercase : i.created_at , reverse=__lowercase ) _UpperCAmelCase = comments[0] if len(__lowercase ) > 0 else None _UpperCAmelCase = dt.utcnow() _UpperCAmelCase = (current_time - issue.updated_at).days _UpperCAmelCase = (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state="closed" ) elif ( days_since_updated > 23 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Add stale comment issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) if __name__ == "__main__": main()
22
0
from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase_ ) class A_ ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self , *snake_case , **snake_case ): super().__init__(*snake_case_ , **snake_case_ ) requires_backends(self , 'decord' ) self.check_model_type(snake_case_ ) def SCREAMING_SNAKE_CASE__ ( self , snake_case=None , snake_case=None , snake_case=None ): lowercase = {} if frame_sampling_rate is not None: lowercase = frame_sampling_rate if num_frames is not None: lowercase = num_frames lowercase = {} if top_k is not None: lowercase = top_k return preprocess_params, {}, postprocess_params def __call__( self , snake_case , **snake_case ): return super().__call__(snake_case_ , **snake_case_ ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None , snake_case=1 ): if num_frames is None: lowercase = self.model.config.num_frames if video.startswith('http://' ) or video.startswith('https://' ): lowercase = BytesIO(requests.get(snake_case_ ).content ) lowercase = VideoReader(snake_case_ ) videoreader.seek(0 ) lowercase = 0 lowercase = num_frames * frame_sampling_rate - 1 lowercase = np.linspace(snake_case_ , snake_case_ , num=snake_case_ , dtype=np.intaa ) lowercase = videoreader.get_batch(snake_case_ ).asnumpy() lowercase = list(snake_case_ ) lowercase = self.image_processor(snake_case_ , return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE__ ( self , snake_case ): lowercase = self.model(**snake_case_ ) return model_outputs def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=5 ): if top_k > self.model.config.num_labels: lowercase = self.model.config.num_labels if self.framework == "pt": lowercase = model_outputs.logits.softmax(-1 )[0] lowercase , lowercase = probs.topk(snake_case_ ) else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) lowercase = scores.tolist() lowercase = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(snake_case_ , snake_case_ )]
195
'''simple docstring''' import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( "files" , [ ["full:README.md", "dataset_infos.json"], ["empty:README.md", "dataset_infos.json"], ["dataset_infos.json"], ["full:README.md"], ] , ) def UpperCAmelCase_ ( __lowercase : Any , __lowercase : int ) -> int: '''simple docstring''' _UpperCAmelCase = tmp_path_factory.mktemp("dset_infos_dir" ) if "full:README.md" in files: with open(dataset_infos_dir / "README.md" , "w" ) as f: f.write("---\ndataset_info:\n dataset_size: 42\n---" ) if "empty:README.md" in files: with open(dataset_infos_dir / "README.md" , "w" ) as f: f.write("" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f: f.write("{\"default\": {\"dataset_size\": 42}}" ) _UpperCAmelCase = DatasetInfosDict.from_directory(__lowercase ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( "dataset_info" , [ DatasetInfo(), DatasetInfo( description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ), ] , ) def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : DatasetInfo ) -> Any: '''simple docstring''' _UpperCAmelCase = str(__lowercase ) dataset_info.write_to_directory(__lowercase ) _UpperCAmelCase = DatasetInfo.from_directory(__lowercase ) assert dataset_info == reloaded assert os.path.exists(os.path.join(__lowercase , "dataset_info.json" ) ) def UpperCAmelCase_ ( ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase = DatasetInfo( description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) _UpperCAmelCase = dataset_info._to_yaml_dict() assert sorted(__lowercase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) _UpperCAmelCase = yaml.safe_dump(__lowercase ) _UpperCAmelCase = yaml.safe_load(__lowercase ) assert dataset_info_yaml_dict == reloaded def UpperCAmelCase_ ( ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = DatasetInfo() _UpperCAmelCase = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( "dataset_infos_dict" , [ DatasetInfosDict(), DatasetInfosDict({"default": DatasetInfo()} ), DatasetInfosDict({"my_config_name": DatasetInfo()} ), DatasetInfosDict( { "default": DatasetInfo( description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ) } ), DatasetInfosDict( { "v1": DatasetInfo(dataset_size=42 ), "v2": DatasetInfo(dataset_size=1337 ), } ), ] , ) def UpperCAmelCase_ ( __lowercase : int , __lowercase : DatasetInfosDict ) -> Dict: '''simple docstring''' _UpperCAmelCase = str(__lowercase ) dataset_infos_dict.write_to_directory(__lowercase ) _UpperCAmelCase = DatasetInfosDict.from_directory(__lowercase ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): _UpperCAmelCase = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml _UpperCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(__lowercase , "README.md" ) )
22
0
"""simple docstring""" import functools from typing import Any def lowercase (snake_case__ : str , snake_case__ : list[str] ) -> bool: '''simple docstring''' if not isinstance(__lowercase , __lowercase ) or len(__lowercase ) == 0: raise ValueError("""the string should be not empty string""" ) if not isinstance(__lowercase , __lowercase ) or not all( isinstance(__lowercase , __lowercase ) and len(__lowercase ) > 0 for item in words ): raise ValueError("""the words should be a list of non-empty strings""" ) # Build trie lowerCAmelCase = {} lowerCAmelCase = """WORD_KEEPER""" for word in words: lowerCAmelCase = trie for c in word: if c not in trie_node: lowerCAmelCase = {} lowerCAmelCase = trie_node[c] lowerCAmelCase = True lowerCAmelCase = len(__lowercase ) # Dynamic programming method @functools.cache def is_breakable(snake_case__ : int ) -> bool: if index == len_string: return True lowerCAmelCase = trie for i in range(__lowercase , __lowercase ): lowerCAmelCase = trie_node.get(string[i] , __lowercase ) if trie_node is None: return False if trie_node.get(__lowercase , __lowercase ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
155
'''simple docstring''' def UpperCAmelCase_ ( __lowercase : str ) -> str: '''simple docstring''' return " ".join( "".join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('''Hey wollef sroirraw'''))
22
0
"""simple docstring""" import colorsys from PIL import Image # type: ignore def _SCREAMING_SNAKE_CASE ( __snake_case : float , __snake_case : float , __snake_case : int ): '''simple docstring''' lowercase = x lowercase = y for step in range(__lowercase ): # noqa: B007 lowercase = a * a - b * b + x lowercase = 2 * a * b + y lowercase = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def _SCREAMING_SNAKE_CASE ( __snake_case : float ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return (2_55, 2_55, 2_55) def _SCREAMING_SNAKE_CASE ( __snake_case : float ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(__lowercase , 1 , 1 ) ) def _SCREAMING_SNAKE_CASE ( __snake_case : int = 8_00 , __snake_case : int = 6_00 , __snake_case : float = -0.6 , __snake_case : float = 0 , __snake_case : float = 3.2 , __snake_case : int = 50 , __snake_case : bool = True , ): '''simple docstring''' lowercase = Image.new('RGB' , (image_width, image_height) ) lowercase = img.load() # loop through the image-coordinates for image_x in range(__lowercase ): for image_y in range(__lowercase ): # determine the figure-coordinates based on the image-coordinates lowercase = figure_width / image_width * image_height lowercase = figure_center_x + (image_x / image_width - 0.5) * figure_width lowercase = figure_center_y + (image_y / image_height - 0.5) * figure_height lowercase = get_distance(__lowercase , __lowercase , __lowercase ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: lowercase = get_color_coded_rgb(__lowercase ) else: lowercase = get_black_and_white_rgb(__lowercase ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _UpperCamelCase : Any = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
220
'''simple docstring''' def UpperCAmelCase_ ( __lowercase : str ) -> list: '''simple docstring''' if n_term == "": return [] _UpperCAmelCase = [] for temp in range(int(__lowercase ) ): series.append(f'1/{temp + 1}' if series else "1" ) return series if __name__ == "__main__": __SCREAMING_SNAKE_CASE :str = input('''Enter the last number (nth term) of the Harmonic Series''') print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''') print(harmonic_series(nth_term))
22
0
'''simple docstring''' from heapq import heappop, heappush import numpy as np def __UpperCamelCase ( lowercase__ : np.ndarray, lowercase__ : tuple[int, int], lowercase__ : tuple[int, int], lowercase__ : bool, ): '''simple docstring''' __lowercase , __lowercase =grid.shape __lowercase =[-1, 1, 0, 0] __lowercase =[0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] __lowercase , __lowercase =[(0, source)], set() __lowercase =np.full((rows, cols), np.inf ) __lowercase =0 __lowercase =np.empty((rows, cols), dtype=__lowercase ) __lowercase =None while queue: ((__lowercase) , (__lowercase)) =heappop(__lowercase ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: __lowercase =[] while (x, y) != source: path.append((x, y) ) __lowercase , __lowercase =predecessors[x, y] path.append(__lowercase ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(__lowercase ) ): __lowercase , __lowercase =x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: __lowercase =grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(__lowercase, (dist + 1, (nx, ny)) ) __lowercase =dist + 1 __lowercase =(x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
141
'''simple docstring''' import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __SCREAMING_SNAKE_CASE :int = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''') @require_sentencepiece @require_tokenizers class A_ ( lowerCAmelCase_ , unittest.TestCase ): _lowerCamelCase : List[str] = PegasusTokenizer _lowerCamelCase : int = PegasusTokenizerFast _lowerCamelCase : Union[str, Any] = True _lowerCamelCase : List[str] = True def lowercase ( self : Optional[int] ): super().setUp() # We have a SentencePiece fixture for testing _UpperCAmelCase = PegasusTokenizer(snake_case_ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase ( self : Tuple ): return PegasusTokenizer.from_pretrained("google/pegasus-large" ) def lowercase ( self : Union[str, Any] , **snake_case_ : Union[str, Any] ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def lowercase ( self : Tuple , snake_case_ : Any ): return ("This is a test", "This is a test") def lowercase ( self : Optional[int] ): _UpperCAmelCase = "</s>" _UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ ) def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "</s>" ) self.assertEqual(vocab_keys[-1] , "v" ) self.assertEqual(len(snake_case_ ) , 1_1_0_3 ) def lowercase ( self : Any ): self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 ) def lowercase ( self : List[Any] ): _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) _UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname ) _UpperCAmelCase = ( "Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important" " </s> <pad> <pad> <pad>" ) _UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0] _UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0] self.assertListEqual(snake_case_ , snake_case_ ) def lowercase ( self : Tuple ): _UpperCAmelCase = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word _UpperCAmelCase = "<mask_1> To ensure a <mask_2> flow of bank resolutions." _UpperCAmelCase = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1] _UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=snake_case_ ).input_ids[0] self.assertListEqual(snake_case_ , snake_case_ ) def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6_1_0_3 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 1_0_3 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1_0_2_4 _UpperCAmelCase = "To ensure a smooth flow of bank resolutions." _UpperCAmelCase = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1] _UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=snake_case_ ).input_ids[0] self.assertListEqual(snake_case_ , snake_case_ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def lowercase ( self : int ): _UpperCAmelCase = ["This is going to be way too long." * 1_5_0, "short example"] _UpperCAmelCase = ["not super long but more than 5 tokens", "tiny"] _UpperCAmelCase = self._large_tokenizer(snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" ) _UpperCAmelCase = self._large_tokenizer( text_target=snake_case_ , max_length=5 , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" ) assert batch.input_ids.shape == (2, 1_0_2_4) assert batch.attention_mask.shape == (2, 1_0_2_4) assert targets["input_ids"].shape == (2, 5) assert len(snake_case_ ) == 2 # input_ids, attention_mask. @slow def lowercase ( self : Dict ): # fmt: off _UpperCAmelCase = {"input_ids": [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case_ , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , ) @require_sentencepiece @require_tokenizers class A_ ( lowerCAmelCase_ , unittest.TestCase ): _lowerCamelCase : List[str] = PegasusTokenizer _lowerCamelCase : List[Any] = PegasusTokenizerFast _lowerCamelCase : int = True _lowerCamelCase : Union[str, Any] = True def lowercase ( self : Any ): super().setUp() # We have a SentencePiece fixture for testing _UpperCAmelCase = PegasusTokenizer(snake_case_ , offset=0 , mask_token_sent=snake_case_ , mask_token="[MASK]" ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase ( self : Tuple ): return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" ) def lowercase ( self : Optional[Any] , **snake_case_ : Dict ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def lowercase ( self : Union[str, Any] , snake_case_ : str ): return ("This is a test", "This is a test") def lowercase ( self : List[str] ): _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) _UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname ) _UpperCAmelCase = ( "Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>" " <pad> <pad> <pad>" ) _UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0] _UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0] self.assertListEqual(snake_case_ , snake_case_ ) @require_torch def lowercase ( self : Tuple ): _UpperCAmelCase = ["This is going to be way too long." * 1_0_0_0, "short example"] _UpperCAmelCase = ["not super long but more than 5 tokens", "tiny"] _UpperCAmelCase = self._large_tokenizer(snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" ) _UpperCAmelCase = self._large_tokenizer( text_target=snake_case_ , max_length=5 , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" ) assert batch.input_ids.shape == (2, 4_0_9_6) assert batch.attention_mask.shape == (2, 4_0_9_6) assert targets["input_ids"].shape == (2, 5) assert len(snake_case_ ) == 2 # input_ids, attention_mask. def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = ( "This is an example string that is used to test the original TF implementation against the HF" " implementation" ) _UpperCAmelCase = self._large_tokenizer(snake_case_ ).input_ids self.assertListEqual( snake_case_ , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
22
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Any = logging.get_logger(__name__) lowerCamelCase : int = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''} class __lowercase (lowerCAmelCase_ ): """simple docstring""" _snake_case = """openai-gpt""" _snake_case = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , A=4_0_4_7_8 , A=5_1_2 , A=7_6_8 , A=1_2 , A=1_2 , A="gelu" , A=0.1 , A=0.1 , A=0.1 , A=1e-5 , A=0.02 , A="cls_index" , A=True , A=None , A=True , A=0.1 , **A , ) -> Any: snake_case : Dict = vocab_size snake_case : Tuple = n_positions snake_case : Tuple = n_embd snake_case : int = n_layer snake_case : Optional[Any] = n_head snake_case : int = afn snake_case : str = resid_pdrop snake_case : Optional[Any] = embd_pdrop snake_case : Optional[int] = attn_pdrop snake_case : Any = layer_norm_epsilon snake_case : str = initializer_range snake_case : Optional[int] = summary_type snake_case : Union[str, Any] = summary_use_proj snake_case : int = summary_activation snake_case : Optional[int] = summary_first_dropout snake_case : Tuple = summary_proj_to_labels super().__init__(**snake_case_ )
124
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class A_ ( unittest.TestCase ): def lowercase ( self : int ): _UpperCAmelCase = tempfile.mkdtemp() _UpperCAmelCase = BlipImageProcessor() _UpperCAmelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) _UpperCAmelCase = BlipProcessor(snake_case_ , snake_case_ ) processor.save_pretrained(self.tmpdirname ) def lowercase ( self : Tuple , **snake_case_ : int ): return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).tokenizer def lowercase ( self : Dict , **snake_case_ : Any ): return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).image_processor def lowercase ( self : int ): shutil.rmtree(self.tmpdirname ) def lowercase ( self : Optional[Any] ): _UpperCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] _UpperCAmelCase = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase ( self : int ): _UpperCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) _UpperCAmelCase = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 ) _UpperCAmelCase = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case_ ) def lowercase ( self : Any ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = image_processor(snake_case_ , return_tensors="np" ) _UpperCAmelCase = processor(images=snake_case_ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase ( self : Optional[int] ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _UpperCAmelCase = "lower newer" _UpperCAmelCase = processor(text=snake_case_ ) _UpperCAmelCase = tokenizer(snake_case_ , return_token_type_ids=snake_case_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase ( self : Optional[Any] ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _UpperCAmelCase = "lower newer" _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = processor(text=snake_case_ , images=snake_case_ ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(snake_case_ ): processor() def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCAmelCase = processor.batch_decode(snake_case_ ) _UpperCAmelCase = tokenizer.batch_decode(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) def lowercase ( self : str ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _UpperCAmelCase = "lower newer" _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = processor(text=snake_case_ , images=snake_case_ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
22
0
import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=5 ) -> Optional[Any]: '''simple docstring''' assert masked_input.count('''<mask>''' ) == 1 UpperCAmelCase = torch.tensor(tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) ).unsqueeze(0 ) # Batch size 1 UpperCAmelCase = model(__lowercase )[0] # The last hidden-state is the first element of the output tuple UpperCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() UpperCAmelCase = logits[0, masked_index, :] UpperCAmelCase = logits.softmax(dim=0 ) UpperCAmelCase , UpperCAmelCase = prob.topk(k=__lowercase , dim=0 ) UpperCAmelCase = ''' '''.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__lowercase ) )] ) UpperCAmelCase = tokenizer.mask_token UpperCAmelCase = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ): UpperCAmelCase = predicted_token_bpe.replace('''\u2581''' , ''' ''' ) if " {0}".format(__lowercase ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(''' {0}'''.format(__lowercase ) , __lowercase ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(__lowercase , __lowercase ), values[index].item(), predicted_token, ) ) return topk_filled_outputs __A : Any = CamembertTokenizer.from_pretrained("camembert-base") __A : Optional[Any] = CamembertForMaskedLM.from_pretrained("camembert-base") model.eval() __A : int = '''Le camembert est <mask> :)''' print(fill_mask(masked_input, model, tokenizer, topk=3))
273
'''simple docstring''' import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def UpperCAmelCase_ ( __lowercase : str ) -> List[str]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = image.size _UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 _UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) _UpperCAmelCase = np.array(__lowercase ).astype(np.floataa ) / 255.0 _UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 ) _UpperCAmelCase = torch.from_numpy(__lowercase ) return 2.0 * image - 1.0 class A_ ( lowerCAmelCase_ ): def __init__( self : Optional[Any] , snake_case_ : VQModel , snake_case_ : UNetaDModel , snake_case_ : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): super().__init__() self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ ) @torch.no_grad() def __call__( self : Any , snake_case_ : Union[torch.Tensor, PIL.Image.Image] = None , snake_case_ : Optional[int] = 1 , snake_case_ : Optional[int] = 1_0_0 , snake_case_ : Optional[float] = 0.0 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ): if isinstance(snake_case_ , PIL.Image.Image ): _UpperCAmelCase = 1 elif isinstance(snake_case_ , torch.Tensor ): _UpperCAmelCase = image.shape[0] else: raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}' ) if isinstance(snake_case_ , PIL.Image.Image ): _UpperCAmelCase = preprocess(snake_case_ ) _UpperCAmelCase , _UpperCAmelCase = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image _UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width) _UpperCAmelCase = next(self.unet.parameters() ).dtype _UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ ) _UpperCAmelCase = image.to(device=self.device , dtype=snake_case_ ) # set timesteps and move to the correct device self.scheduler.set_timesteps(snake_case_ , device=self.device ) _UpperCAmelCase = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler _UpperCAmelCase = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _UpperCAmelCase = {} if accepts_eta: _UpperCAmelCase = eta for t in self.progress_bar(snake_case_ ): # concat latents and low resolution image in the channel dimension. _UpperCAmelCase = torch.cat([latents, image] , dim=1 ) _UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ ) # predict the noise residual _UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample # compute the previous noisy sample x_t -> x_t-1 _UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample # decode the image latents with the VQVAE _UpperCAmelCase = self.vqvae.decode(snake_case_ ).sample _UpperCAmelCase = torch.clamp(snake_case_ , -1.0 , 1.0 ) _UpperCAmelCase = image / 2 + 0.5 _UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _UpperCAmelCase = self.numpy_to_pil(snake_case_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=snake_case_ )
22
0
import argparse import hashlib # hashlib is only used inside the Test class import struct class _a : """simple docstring""" def __init__( self : int , UpperCAmelCase : str ): A_ = data A_ = [0x67_45_23_01, 0xef_cd_ab_89, 0x98_ba_dc_fe, 0x10_32_54_76, 0xc3_d2_e1_f0] @staticmethod def __A ( UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ): return ((n << b) | (n >> (32 - b))) & 0xff_ff_ff_ff def __A ( self : Union[str, Any] ): A_ = B"\x80" + B"\x00" * (63 - (len(self.data ) + 8) % 64) A_ = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) ) return padded_data def __A ( self : Optional[int] ): return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def __A ( self : Tuple , UpperCAmelCase : List[str] ): A_ = list(struct.unpack(">16L" , snake_case_ ) ) + [0] * 64 for i in range(16 , 80 ): A_ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def __A ( self : List[Any] ): A_ = self.padding() A_ = self.split_blocks() for block in self.blocks: A_ = self.expand_block(snake_case_ ) A_ , A_ , A_ , A_ , A_ = self.h for i in range(0 , 80 ): if 0 <= i < 20: A_ = (b & c) | ((~b) & d) A_ = 0x5a_82_79_99 elif 20 <= i < 40: A_ = b ^ c ^ d A_ = 0x6e_d9_eb_a1 elif 40 <= i < 60: A_ = (b & c) | (b & d) | (c & d) A_ = 0x8f_1b_bc_dc elif 60 <= i < 80: A_ = b ^ c ^ d A_ = 0xca_62_c1_d6 A_ , A_ , A_ , A_ , A_ = ( self.rotate(snake_case_ , 5 ) + f + e + k + expanded_block[i] & 0xff_ff_ff_ff, a, self.rotate(snake_case_ , 30 ), c, d, ) A_ = ( self.h[0] + a & 0xff_ff_ff_ff, self.h[1] + b & 0xff_ff_ff_ff, self.h[2] + c & 0xff_ff_ff_ff, self.h[3] + d & 0xff_ff_ff_ff, self.h[4] + e & 0xff_ff_ff_ff, ) return ("{:08x}" * 5).format(*self.h ) def __snake_case ( ): """simple docstring""" A_ = B"Test String" assert SHAaHash(__lowercase ).final_hash() == hashlib.shaa(__lowercase ).hexdigest() # noqa: S324 def __snake_case ( ): """simple docstring""" A_ = argparse.ArgumentParser(description="Process some strings or files" ) parser.add_argument( "--string" ,dest="input_string" ,default="Hello World!! Welcome to Cryptography" ,help="Hash the string" ,) parser.add_argument("--file" ,dest="input_file" ,help="Hash contents of a file" ) A_ = parser.parse_args() A_ = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file ,"rb" ) as f: A_ = f.read() else: A_ = bytes(__lowercase ,"utf-8" ) print(SHAaHash(__lowercase ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
312
'''simple docstring''' import string from math import logaa def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> int: '''simple docstring''' _UpperCAmelCase = document.translate( str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" ) _UpperCAmelCase = document_without_punctuation.split(" " ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> tuple[int, int]: '''simple docstring''' _UpperCAmelCase = corpus.lower().translate( str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with '' _UpperCAmelCase = corpus_without_punctuation.split("\n" ) _UpperCAmelCase = term.lower() return (len([doc for doc in docs if term in doc] ), len(__lowercase )) def UpperCAmelCase_ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any]=False ) -> float: '''simple docstring''' if smoothing: if n == 0: raise ValueError("log10(0) is undefined." ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("df must be > 0" ) elif n == 0: raise ValueError("log10(0) is undefined." ) return round(logaa(n / df ) , 3 ) def UpperCAmelCase_ ( __lowercase : int , __lowercase : int ) -> float: '''simple docstring''' return round(tf * idf , 3 )
22
0
from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class __lowercase (lowerCAmelCase_ ): def __init__( self , A_ = None , A_ = None , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , **A_ , ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Optional[int] = path_or_paths __lowerCAmelCase : List[Any] = split if split or isinstance(snake_case_ , snake_case_ ) else '''train''' __lowerCAmelCase : Tuple = features __lowerCAmelCase : Optional[int] = cache_dir __lowerCAmelCase : int = keep_in_memory __lowerCAmelCase : Tuple = streaming __lowerCAmelCase : Optional[int] = num_proc __lowerCAmelCase : str = kwargs @abstractmethod def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' pass class __lowercase (lowerCAmelCase_ ): def __init__( self , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , **A_ , ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : List[str] = features __lowerCAmelCase : Any = cache_dir __lowerCAmelCase : Optional[Any] = keep_in_memory __lowerCAmelCase : Optional[Any] = streaming __lowerCAmelCase : Union[str, Any] = num_proc __lowerCAmelCase : Union[str, Any] = kwargs @abstractmethod def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' pass
275
'''simple docstring''' from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
22
0
"""simple docstring""" import numpy as np class UpperCamelCase : def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ) -> str: '''simple docstring''' self.set_matricies(red=snake_case_ ,green=snake_case_ ,blue=snake_case_ ,red_edge=snake_case_ ,nir=snake_case_ ) def _UpperCAmelCase ( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ) -> Optional[Any]: '''simple docstring''' if red is not None: lowercase_ : Any = red if green is not None: lowercase_ : List[Any] = green if blue is not None: lowercase_ : Any = blue if red_edge is not None: lowercase_ : Dict = red_edge if nir is not None: lowercase_ : Tuple = nir return True def _UpperCAmelCase ( self ,__UpperCamelCase="" ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ) -> str: '''simple docstring''' self.set_matricies(red=snake_case_ ,green=snake_case_ ,blue=snake_case_ ,red_edge=snake_case_ ,nir=snake_case_ ) lowercase_ : Any = { 'ARVI2': self.arvaa, 'CCCI': self.ccci, 'CVI': self.cvi, 'GLI': self.gli, 'NDVI': self.ndvi, 'BNDVI': self.bndvi, 'redEdgeNDVI': self.red_edge_ndvi, 'GNDVI': self.gndvi, 'GBNDVI': self.gbndvi, 'GRNDVI': self.grndvi, 'RBNDVI': self.rbndvi, 'PNDVI': self.pndvi, 'ATSAVI': self.atsavi, 'BWDRVI': self.bwdrvi, 'CIgreen': self.ci_green, 'CIrededge': self.ci_rededge, 'CI': self.ci, 'CTVI': self.ctvi, 'GDVI': self.gdvi, 'EVI': self.evi, 'GEMI': self.gemi, 'GOSAVI': self.gosavi, 'GSAVI': self.gsavi, 'Hue': self.hue, 'IVI': self.ivi, 'IPVI': self.ipvi, 'I': self.i, 'RVI': self.rvi, 'MRVI': self.mrvi, 'MSAVI': self.m_savi, 'NormG': self.norm_g, 'NormNIR': self.norm_nir, 'NormR': self.norm_r, 'NGRDI': self.ngrdi, 'RI': self.ri, 'S': self.s, 'IF': self._if, 'DVI': self.dvi, 'TVI': self.tvi, 'NDRE': self.ndre, } try: return funcs[index]() except KeyError: print('Index not in the list!' ) return False def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' return self.nir * (self.red / (self.green**2)) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' return (self.nir - self.red) / (self.nir + self.red) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' return (self.nir - self.blue) / (self.nir + self.blue) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' return (self.redEdge - self.red) / (self.redEdge + self.red) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' return (self.nir - self.green) / (self.nir + self.green) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def _UpperCAmelCase ( self ,__UpperCamelCase=0.08 ,__UpperCamelCase=1.22 ,__UpperCamelCase=0.03 ) -> Union[str, Any]: '''simple docstring''' return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return (self.nir / self.green) - 1 def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' return (self.nir / self.redEdge) - 1 def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' return (self.red - self.blue) / self.red def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[Any] = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return self.nir - self.green def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red) def _UpperCAmelCase ( self ,__UpperCamelCase=0.16 ) -> Dict: '''simple docstring''' return (self.nir - self.green) / (self.nir + self.green + y) def _UpperCAmelCase ( self ,__UpperCamelCase=0.5 ) -> List[Any]: '''simple docstring''' return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' return np.arctan( ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) ) def _UpperCAmelCase ( self ,__UpperCamelCase=None ,__UpperCamelCase=None ) -> List[str]: '''simple docstring''' return (self.nir - b) / (a * self.red) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return (self.red + self.green + self.blue) / 30.5 def _UpperCAmelCase ( self ) -> int: '''simple docstring''' return self.nir / self.red def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return (self.rvi() - 1) / (self.rvi() + 1) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return self.green / (self.nir + self.red + self.green) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' return self.nir / (self.nir + self.red + self.green) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' return self.red / (self.nir + self.red + self.green) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return (self.green - self.red) / (self.green + self.red) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return (self.red - self.green) / (self.red + self.green) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : List[str] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) lowercase_ : str = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def _UpperCAmelCase ( self ) -> int: '''simple docstring''' return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' return self.nir / self.red def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' return (self.ndvi() + 0.5) ** (1 / 2) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' return (self.nir - self.redEdge) / (self.nir + self.redEdge)
213
'''simple docstring''' def UpperCAmelCase_ ( __lowercase : int ) -> int: '''simple docstring''' if not isinstance(__lowercase , __lowercase ) or number < 0: raise ValueError("Input must be a non-negative integer" ) _UpperCAmelCase = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
22
0
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __A : '''simple docstring''' @staticmethod def a__ (*A , **A ) -> Optional[Any]: """simple docstring""" pass @is_pipeline_test @require_vision @require_timm @require_torch class __A ( unittest.TestCase ): '''simple docstring''' __lowerCamelCase : List[str] = MODEL_FOR_OBJECT_DETECTION_MAPPING def a__ (self , A , A , A ) -> List[Any]: """simple docstring""" _a = ObjectDetectionPipeline(model=snake_case_ , image_processor=snake_case_ ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def a__ (self , A , A ) -> Dict: """simple docstring""" _a = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 ) self.assertGreater(len(snake_case_ ) , 0 ) for detected_object in outputs: self.assertEqual( snake_case_ , { '''score''': ANY(snake_case_ ), '''label''': ANY(snake_case_ ), '''box''': {'''xmin''': ANY(snake_case_ ), '''ymin''': ANY(snake_case_ ), '''xmax''': ANY(snake_case_ ), '''ymax''': ANY(snake_case_ )}, } , ) import datasets _a = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) _a = [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] _a = object_detector(snake_case_ , threshold=0.0 ) self.assertEqual(len(snake_case_ ) , len(snake_case_ ) ) for outputs in batch_outputs: self.assertGreater(len(snake_case_ ) , 0 ) for detected_object in outputs: self.assertEqual( snake_case_ , { '''score''': ANY(snake_case_ ), '''label''': ANY(snake_case_ ), '''box''': {'''xmin''': ANY(snake_case_ ), '''ymin''': ANY(snake_case_ ), '''xmax''': ANY(snake_case_ ), '''ymax''': ANY(snake_case_ )}, } , ) @require_tf @unittest.skip('''Object detection not implemented in TF''' ) def a__ (self ) -> Dict: """simple docstring""" pass @require_torch def a__ (self ) -> Any: """simple docstring""" _a = '''hf-internal-testing/tiny-detr-mobilenetsv3''' _a = AutoModelForObjectDetection.from_pretrained(snake_case_ ) _a = AutoFeatureExtractor.from_pretrained(snake_case_ ) _a = ObjectDetectionPipeline(model=snake_case_ , feature_extractor=snake_case_ ) _a = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 ) self.assertEqual( nested_simplify(snake_case_ , decimals=4 ) , [ {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, ] , ) _a = object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] , threshold=0.0 , ) self.assertEqual( nested_simplify(snake_case_ , decimals=4 ) , [ [ {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, ], [ {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, ], ] , ) @require_torch @slow def a__ (self ) -> Dict: """simple docstring""" _a = '''facebook/detr-resnet-50''' _a = AutoModelForObjectDetection.from_pretrained(snake_case_ ) _a = AutoFeatureExtractor.from_pretrained(snake_case_ ) _a = ObjectDetectionPipeline(model=snake_case_ , feature_extractor=snake_case_ ) _a = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) self.assertEqual( nested_simplify(snake_case_ , decimals=4 ) , [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ] , ) _a = object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] ) self.assertEqual( nested_simplify(snake_case_ , decimals=4 ) , [ [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], ] , ) @require_torch @slow def a__ (self ) -> str: """simple docstring""" _a = '''facebook/detr-resnet-50''' _a = pipeline('''object-detection''' , model=snake_case_ ) _a = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) self.assertEqual( nested_simplify(snake_case_ , decimals=4 ) , [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ] , ) _a = object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] ) self.assertEqual( nested_simplify(snake_case_ , decimals=4 ) , [ [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], ] , ) @require_torch @slow def a__ (self ) -> Union[str, Any]: """simple docstring""" _a = 0.9985 _a = '''facebook/detr-resnet-50''' _a = pipeline('''object-detection''' , model=snake_case_ ) _a = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=snake_case_ ) self.assertEqual( nested_simplify(snake_case_ , decimals=4 ) , [ {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ] , ) @require_torch @require_pytesseract @slow def a__ (self ) -> str: """simple docstring""" _a = '''Narsil/layoutlmv3-finetuned-funsd''' _a = 0.9993 _a = pipeline('''object-detection''' , model=snake_case_ , threshold=snake_case_ ) _a = object_detector( '''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' ) self.assertEqual( nested_simplify(snake_case_ , decimals=4 ) , [ {'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}}, {'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}}, ] , )
211
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar __SCREAMING_SNAKE_CASE :Optional[int] = TypeVar('''T''') class A_ ( Generic[T] ): def __init__( self : List[Any] , snake_case_ : list[T] , snake_case_ : Callable[[T, T], T] ): _UpperCAmelCase = None _UpperCAmelCase = len(snake_case_ ) _UpperCAmelCase = [any_type for _ in range(self.N )] + arr _UpperCAmelCase = fnc self.build() def lowercase ( self : List[Any] ): for p in range(self.N - 1 , 0 , -1 ): _UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : T ): p += self.N _UpperCAmelCase = v while p > 1: _UpperCAmelCase = p // 2 _UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def lowercase ( self : Any , snake_case_ : int , snake_case_ : int ): # noqa: E741 _UpperCAmelCase , _UpperCAmelCase = l + self.N, r + self.N _UpperCAmelCase = None while l <= r: if l % 2 == 1: _UpperCAmelCase = self.st[l] if res is None else self.fn(snake_case_ , self.st[l] ) if r % 2 == 0: _UpperCAmelCase = self.st[r] if res is None else self.fn(snake_case_ , self.st[r] ) _UpperCAmelCase , _UpperCAmelCase = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce __SCREAMING_SNAKE_CASE :Union[str, Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] __SCREAMING_SNAKE_CASE :List[str] = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } __SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, min) __SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, max) __SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, lambda a, b: a + b) def UpperCAmelCase_ ( ) -> None: '''simple docstring''' for i in range(len(__lowercase ) ): for j in range(__lowercase , len(__lowercase ) ): _UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] ) _UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] ) _UpperCAmelCase = reduce(lambda __lowercase , __lowercase : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(__lowercase , __lowercase ) assert max_range == max_segment_tree.query(__lowercase , __lowercase ) assert sum_range == sum_segment_tree.query(__lowercase , __lowercase ) test_all_segments() for index, value in test_updates.items(): __SCREAMING_SNAKE_CASE :str = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
22
0
"""simple docstring""" import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class A__ ( lowerCAmelCase_ ): '''simple docstring''' def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: Union[str, "sqlalchemy.sql.Selectable"] , _SCREAMING_SNAKE_CASE: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _SCREAMING_SNAKE_CASE: Optional[Features] = None , _SCREAMING_SNAKE_CASE: str = None , _SCREAMING_SNAKE_CASE: bool = False , **_SCREAMING_SNAKE_CASE: int , ) -> Dict: """simple docstring""" super().__init__(features=snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ , **snake_case_) __lowerCAmelCase : int = Sql( cache_dir=snake_case_ , features=snake_case_ , sql=snake_case_ , con=snake_case_ , **snake_case_ , ) def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[int]: """simple docstring""" __lowerCAmelCase : List[Any] = None __lowerCAmelCase : str = None __lowerCAmelCase : Optional[Any] = None __lowerCAmelCase : int = None self.builder.download_and_prepare( download_config=snake_case_ , download_mode=snake_case_ , verification_mode=snake_case_ , base_path=snake_case_ , ) # Build dataset for splits __lowerCAmelCase : Tuple = self.builder.as_dataset( split="train" , verification_mode=snake_case_ , in_memory=self.keep_in_memory) return dataset class A__ : '''simple docstring''' def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: Dataset , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: Optional[int] = None , **_SCREAMING_SNAKE_CASE: Union[str, Any] , ) -> Tuple: """simple docstring""" if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""") __lowerCAmelCase : int = dataset __lowerCAmelCase : List[str] = name __lowerCAmelCase : Tuple = con __lowerCAmelCase : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __lowerCAmelCase : str = num_proc __lowerCAmelCase : Optional[int] = to_sql_kwargs def _SCREAMING_SNAKE_CASE ( self: Dict) -> Tuple: """simple docstring""" __lowerCAmelCase : Optional[int] = self.to_sql_kwargs.pop("sql" , snake_case_) __lowerCAmelCase : List[Any] = self.to_sql_kwargs.pop("con" , snake_case_) __lowerCAmelCase : Union[str, Any] = self.to_sql_kwargs.pop("index" , snake_case_) __lowerCAmelCase : Optional[Any] = self._write(index=snake_case_ , **self.to_sql_kwargs) return written def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Any) -> Any: """simple docstring""" __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = args __lowerCAmelCase : List[Any] = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs __lowerCAmelCase : str = query_table( table=self.dataset.data , key=slice(snake_case_ , offset + self.batch_size) , indices=self.dataset._indices , ) __lowerCAmelCase : str = batch.to_pandas() __lowerCAmelCase : Optional[Any] = df.to_sql(self.name , self.con , index=snake_case_ , **snake_case_) return num_rows or len(snake_case_) def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , **_SCREAMING_SNAKE_CASE: str) -> Optional[Any]: """simple docstring""" __lowerCAmelCase : str = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset) , self.batch_size) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += self._batch_sql((offset, index, to_sql_kwargs)) else: __lowerCAmelCase , __lowerCAmelCase : List[str] = len(self.dataset), self.batch_size with multiprocessing.Pool(self.num_proc) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , snake_case_ , snake_case_)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += num_rows return written
269
'''simple docstring''' import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( "kwargs, expected" , [ ({"num_shards": 0, "max_num_jobs": 1}, []), ({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]), ({"num_shards": 10, "max_num_jobs": 10}, [range(__lowercase , i + 1 ) for i in range(10 )]), ({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]), ({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = _distribute_shards(**__lowercase ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, max_num_jobs, expected" , [ ({"foo": 0}, 10, [{"foo": 0}]), ({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]), ({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]), ({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]), ({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]), ] , ) def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : Optional[Any] , __lowercase : int ) -> str: '''simple docstring''' _UpperCAmelCase = _split_gen_kwargs(__lowercase , __lowercase ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, expected" , [ ({"foo": 0}, 1), ({"shards": [0]}, 1), ({"shards": [0, 1, 2, 3]}, 4), ({"shards": [0, 1, 2, 3], "foo": 0}, 4), ({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4), ({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError), ] , ) def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : List[Any] ) -> List[Any]: '''simple docstring''' if expected is RuntimeError: with pytest.raises(__lowercase ): _number_of_shards_in_gen_kwargs(__lowercase ) else: _UpperCAmelCase = _number_of_shards_in_gen_kwargs(__lowercase ) assert out == expected
22
0
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): lowercase = current_set.copy() for row_index, row in enumerate(__lowercase ): lowercase = row[0] for column_index, column in enumerate(__lowercase ): if magnitude == 0: lowercase = column continue lowercase = column / magnitude # Subtract to cancel term lowercase = current_set[0] lowercase = [first_row] lowercase = current_set[1::] for row in current_set: lowercase = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(__lowercase ) continue for column_index in range(len(__lowercase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(__lowercase ) # Create next recursion iteration set if len(final_set[0] ) != 3: lowercase = final_set[0] lowercase = [] lowercase = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) lowercase = simplify(__lowercase ) for i in range(len(__lowercase ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , __lowercase ) lowercase = resultant return final_set def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): if len(__lowercase ) == 0: raise IndexError('solve_simultaneous() requires n lists of length n+1' ) lowercase = len(__lowercase ) + 1 if any(len(__lowercase ) != _length for item in equations ): raise IndexError('solve_simultaneous() requires n lists of length n+1' ) for row in equations: if any(not isinstance(__lowercase , (int, float) ) for column in row ): raise ValueError('solve_simultaneous() requires lists of integers' ) if len(__lowercase ) == 1: return [equations[0][-1] / equations[0][0]] lowercase = equations.copy() if any(0 in row for row in data_set ): lowercase = data_set.copy() lowercase = [] for row_index, row in enumerate(__lowercase ): if 0 not in row: lowercase = data_set.pop(__lowercase ) break if not full_row: raise ValueError('solve_simultaneous() requires at least 1 full equation' ) data_set.insert(0 , __lowercase ) lowercase = data_set.copy() lowercase = simplify(__lowercase ) lowercase = simplified[::-1] lowercase = [] for row in simplified: lowercase = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue lowercase = row.copy()[: len(__lowercase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(__lowercase ) == 0: solutions.append(0 ) continue lowercase = temp_row[1::] lowercase = temp_row[::-1] for column_index, column in enumerate(__lowercase ): current_solution -= column * solutions[column_index] solutions.append(__lowercase ) lowercase = [] for item in solutions: final.append(float(round(__lowercase , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
195
'''simple docstring''' import math def UpperCAmelCase_ ( __lowercase : int ) -> bool: '''simple docstring''' return math.sqrt(__lowercase ) * math.sqrt(__lowercase ) == num def UpperCAmelCase_ ( __lowercase : int ) -> bool: '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = n while left <= right: _UpperCAmelCase = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: _UpperCAmelCase = mid - 1 else: _UpperCAmelCase = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
22
0
"""simple docstring""" a = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} a = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def lowercase (snake_case__ : dict[int, list[int]] , snake_case__ : int , snake_case__ : list[bool] ) -> list[int]: '''simple docstring''' lowerCAmelCase = True lowerCAmelCase = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(__lowercase , __lowercase , __lowercase ) order.append(__lowercase ) return order def lowercase (snake_case__ : dict[int, list[int]] , snake_case__ : int , snake_case__ : list[bool] ) -> list[int]: '''simple docstring''' lowerCAmelCase = True lowerCAmelCase = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(__lowercase , __lowercase , __lowercase ) return component def lowercase (snake_case__ : dict[int, list[int]] ) -> list[list[int]]: '''simple docstring''' lowerCAmelCase = len(__lowercase ) * [False] lowerCAmelCase = {vert: [] for vert in range(len(__lowercase ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(__lowercase ) lowerCAmelCase = [] for i, was_visited in enumerate(__lowercase ): if not was_visited: order += topology_sort(__lowercase , __lowercase , __lowercase ) lowerCAmelCase = [] lowerCAmelCase = len(__lowercase ) * [False] for i in range(len(__lowercase ) ): lowerCAmelCase = order[len(__lowercase ) - i - 1] if not visited[vert]: lowerCAmelCase = find_components(__lowercase , __lowercase , __lowercase ) components_list.append(__lowercase ) return components_list
155
'''simple docstring''' import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin __SCREAMING_SNAKE_CASE :Dict = 1e-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class A_ : def __init__( self : List[Any] , snake_case_ : int , snake_case_ : Dict=1_6 , snake_case_ : Dict=1_3 , snake_case_ : int=7 , snake_case_ : Any=1_4 , snake_case_ : int=1_0 , snake_case_ : Any=1_9 , snake_case_ : int=5 , snake_case_ : Any=4 , snake_case_ : Tuple=True , snake_case_ : Optional[int]=1_6 , snake_case_ : List[str]=2 , snake_case_ : Any=4 , snake_case_ : List[Any]=4 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Optional[int]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Tuple=[1, 2, 3, 4, 5] , snake_case_ : str=2_5 , snake_case_ : Any=5 , ): _UpperCAmelCase = d_model _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = prediction_length _UpperCAmelCase = context_length _UpperCAmelCase = cardinality _UpperCAmelCase = num_time_features _UpperCAmelCase = lags_sequence _UpperCAmelCase = embedding_dimension _UpperCAmelCase = is_training _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = context_length _UpperCAmelCase = prediction_length + label_length _UpperCAmelCase = label_length _UpperCAmelCase = moving_average _UpperCAmelCase = autocorrelation_factor def lowercase ( self : Union[str, Any] ): return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def lowercase ( self : int , snake_case_ : Optional[Any] ): _UpperCAmelCase = config.context_length + max(config.lags_sequence ) _UpperCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) _UpperCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) _UpperCAmelCase = floats_tensor([self.batch_size, _past_length] ) _UpperCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs _UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) _UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length] ) _UpperCAmelCase = { "past_values": past_values, "static_categorical_features": static_categorical_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def lowercase ( self : List[Any] ): _UpperCAmelCase = self.get_config() _UpperCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ ) return config, inputs_dict def lowercase ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs() return config, inputs_dict def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : Optional[int] ): _UpperCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval() _UpperCAmelCase = model(**snake_case_ ) _UpperCAmelCase = outputs.encoder_last_hidden_state _UpperCAmelCase = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = model.get_encoder() encoder.save_pretrained(snake_case_ ) _UpperCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = model.create_network_inputs(**snake_case_ ) _UpperCAmelCase , _UpperCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) _UpperCAmelCase = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) _UpperCAmelCase = encoder(inputs_embeds=snake_case_ )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) _UpperCAmelCase = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) _UpperCAmelCase = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) _UpperCAmelCase = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) _UpperCAmelCase = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = model.get_decoder() decoder.save_pretrained(snake_case_ ) _UpperCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ ) _UpperCAmelCase = decoder( trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _lowerCamelCase : List[Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () _lowerCamelCase : Tuple = (AutoformerForPrediction,) if is_torch_available() else () _lowerCamelCase : List[Any] = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {} _lowerCamelCase : Optional[Any] = False _lowerCamelCase : Tuple = False _lowerCamelCase : int = False _lowerCamelCase : Optional[Any] = False _lowerCamelCase : Optional[Any] = False _lowerCamelCase : List[Any] = False def lowercase ( self : Tuple ): _UpperCAmelCase = AutoformerModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ ) def lowercase ( self : Optional[Any] ): self.config_tester.run_common_tests() def lowercase ( self : Union[str, Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case_ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(snake_case_ ) _UpperCAmelCase , _UpperCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ ) self.assertEqual(info["missing_keys"] , [] ) def lowercase ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ ) @unittest.skip(reason="Model has no tokens embeddings" ) def lowercase ( self : Optional[int] ): pass def lowercase ( self : Optional[int] ): _UpperCAmelCase = inspect.signature(getattr(snake_case_ , "forward" ) ) # The main input is the name of the argument after `self` _UpperCAmelCase = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , snake_case_ ) def lowercase ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case_ ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("future_observed_mask" ) expected_arg_names.extend( [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ ) def lowercase ( self : Optional[int] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = True _UpperCAmelCase = getattr(self.model_tester , "seq_length" , snake_case_ ) _UpperCAmelCase = getattr(self.model_tester , "decoder_seq_length" , snake_case_ ) _UpperCAmelCase = getattr(self.model_tester , "encoder_seq_length" , snake_case_ ) _UpperCAmelCase = getattr(self.model_tester , "d_model" , snake_case_ ) _UpperCAmelCase = getattr(self.model_tester , "num_attention_heads" , snake_case_ ) _UpperCAmelCase = d_model // num_attention_heads for model_class in self.all_model_classes: _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) _UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) _UpperCAmelCase = outputs.encoder_attentions self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) _UpperCAmelCase = len(snake_case_ ) _UpperCAmelCase = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(snake_case_ , snake_case_ ) # decoder attentions _UpperCAmelCase = outputs.decoder_attentions self.assertIsInstance(snake_case_ , (list, tuple) ) self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions _UpperCAmelCase = outputs.cross_attentions self.assertIsInstance(snake_case_ , (list, tuple) ) self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) self.assertEqual(out_len + 2 , len(snake_case_ ) ) _UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def lowercase ( self : Dict ): super().test_retain_grad_hidden_states_attentions() def UpperCAmelCase_ ( __lowercase : str="train-batch.pt" ) -> List[str]: '''simple docstring''' _UpperCAmelCase = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__lowercase , repo_type="dataset" ) _UpperCAmelCase = torch.load(__lowercase , map_location=__lowercase ) return batch @require_torch @slow class A_ ( unittest.TestCase ): def lowercase ( self : Optional[int] ): _UpperCAmelCase = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ ) _UpperCAmelCase = prepare_batch() with torch.no_grad(): _UpperCAmelCase = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0] _UpperCAmelCase = torch.Size( (6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , snake_case_ ) _UpperCAmelCase = torch.tensor( [[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=snake_case_ ) self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : Optional[Any] ): _UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ ) _UpperCAmelCase = prepare_batch("val-batch.pt" ) with torch.no_grad(): _UpperCAmelCase = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state _UpperCAmelCase = torch.Size((6_4, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , snake_case_ ) _UpperCAmelCase = torch.tensor( [[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=snake_case_ ) self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : Tuple ): _UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ ) _UpperCAmelCase = prepare_batch("val-batch.pt" ) with torch.no_grad(): _UpperCAmelCase = model.generate( static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , ) _UpperCAmelCase = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , snake_case_ ) _UpperCAmelCase = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=snake_case_ ) _UpperCAmelCase = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
22
0
"""simple docstring""" _UpperCamelCase : Union[str, Any] = range(2, 2_0 + 1) _UpperCamelCase : Optional[Any] = [1_0**k for k in range(ks[-1] + 1)] _UpperCamelCase : dict[int, dict[int, list[list[int]]]] = {} def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : int , __snake_case : Optional[int] , __snake_case : Union[str, Any] ): '''simple docstring''' lowercase = sum(a_i[j] for j in range(__lowercase , len(__lowercase ) ) ) lowercase = sum(a_i[j] * base[j] for j in range(min(len(__lowercase ) , __lowercase ) ) ) lowercase , lowercase = 0, 0 lowercase = n - i lowercase = memo.get(__lowercase ) if sub_memo is not None: lowercase = sub_memo.get(__lowercase ) if jumps is not None and len(__lowercase ) > 0: # find and make the largest jump without going over lowercase = -1 for _k in range(len(__lowercase ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: lowercase = _k break if max_jump >= 0: lowercase , lowercase , lowercase = jumps[max_jump] # since the difference between jumps is cached, add c lowercase = diff + c for j in range(min(__lowercase , len(__lowercase ) ) ): lowercase , lowercase = divmod(__lowercase , 10 ) if new_c > 0: add(__lowercase , __lowercase , __lowercase ) else: lowercase = [] else: lowercase = {c: []} lowercase = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps lowercase , lowercase = next_term(__lowercase , k - 1 , i + dn , __lowercase ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead lowercase , lowercase = compute(__lowercase , __lowercase , i + dn , __lowercase ) diff += _diff dn += terms_jumped lowercase = sub_memo[c] # keep jumps sorted by # of terms skipped lowercase = 0 while j < len(__lowercase ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(__lowercase , (diff, dn, k) ) return (diff, dn) def _SCREAMING_SNAKE_CASE ( __snake_case : Any , __snake_case : Optional[Any] , __snake_case : int , __snake_case : Union[str, Any] ): '''simple docstring''' if i >= n: return 0, i if k > len(__lowercase ): a_i.extend([0 for _ in range(k - len(__lowercase ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) lowercase = i lowercase , lowercase , lowercase = 0, 0, 0 for j in range(len(__lowercase ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 lowercase = ds_c + ds_b diff += addend lowercase = 0 for j in range(__lowercase ): lowercase = a_i[j] + addend lowercase , lowercase = divmod(__lowercase , 10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(__lowercase , __lowercase , __lowercase ) return diff, i - start_i def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : int ): '''simple docstring''' for j in range(__lowercase , len(__lowercase ) ): lowercase = digits[j] + addend if s >= 10: lowercase , lowercase = divmod(__lowercase , 10 ) lowercase = addend // 10 + quotient else: lowercase = s lowercase = addend // 10 if addend == 0: break while addend > 0: lowercase , lowercase = divmod(__lowercase , 10 ) digits.append(__lowercase ) def _SCREAMING_SNAKE_CASE ( __snake_case : int = 10**15 ): '''simple docstring''' lowercase = [1] lowercase = 1 lowercase = 0 while True: lowercase , lowercase = next_term(__lowercase , 20 , i + dn , __lowercase ) dn += terms_jumped if dn == n - i: break lowercase = 0 for j in range(len(__lowercase ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(F'''{solution() = }''')
220
'''simple docstring''' from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging __SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__) class A_ : _lowerCamelCase : str _lowerCamelCase : str = None @staticmethod def lowercase ( ): raise NotImplementedError def lowercase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : str , **snake_case_ : List[Any] ): raise NotImplementedError def lowercase ( self : Any , snake_case_ : int ): raise NotImplementedError def lowercase ( self : List[str] ): if not self.is_available(): raise RuntimeError( f'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' ) @classmethod def lowercase ( cls : List[Any] ): return f'`pip install {cls.pip_package or cls.name}`' class A_ ( lowerCAmelCase_ ): _lowerCamelCase : int = """optuna""" @staticmethod def lowercase ( ): return is_optuna_available() def lowercase ( self : List[str] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : Tuple ): return run_hp_search_optuna(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) def lowercase ( self : int , snake_case_ : Optional[int] ): return default_hp_space_optuna(snake_case_ ) class A_ ( lowerCAmelCase_ ): _lowerCamelCase : Any = """ray""" _lowerCamelCase : Tuple = """'ray[tune]'""" @staticmethod def lowercase ( ): return is_ray_available() def lowercase ( self : Optional[Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : List[str] ): return run_hp_search_ray(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) def lowercase ( self : Any , snake_case_ : str ): return default_hp_space_ray(snake_case_ ) class A_ ( lowerCAmelCase_ ): _lowerCamelCase : int = """sigopt""" @staticmethod def lowercase ( ): return is_sigopt_available() def lowercase ( self : Any , snake_case_ : int , snake_case_ : int , snake_case_ : str , **snake_case_ : Dict ): return run_hp_search_sigopt(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) def lowercase ( self : Dict , snake_case_ : Optional[Any] ): return default_hp_space_sigopt(snake_case_ ) class A_ ( lowerCAmelCase_ ): _lowerCamelCase : Optional[int] = """wandb""" @staticmethod def lowercase ( ): return is_wandb_available() def lowercase ( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : str , **snake_case_ : Optional[Any] ): return run_hp_search_wandb(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) def lowercase ( self : Any , snake_case_ : Union[str, Any] ): return default_hp_space_wandb(snake_case_ ) __SCREAMING_SNAKE_CASE :Dict = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def UpperCAmelCase_ ( ) -> str: '''simple docstring''' _UpperCAmelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(__lowercase ) > 0: _UpperCAmelCase = available_backends[0].name if len(__lowercase ) > 1: logger.info( f'{len(__lowercase )} hyperparameter search backends available. Using {name} as the default.' ) return name raise RuntimeError( "No hyperparameter search backend available.\n" + "\n".join( f' - To install {backend.name} run {backend.pip_install()}' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
22
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ['''FNetTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ['''FNetTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ '''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FNetForMaskedLM''', '''FNetForMultipleChoice''', '''FNetForNextSentencePrediction''', '''FNetForPreTraining''', '''FNetForQuestionAnswering''', '''FNetForSequenceClassification''', '''FNetForTokenClassification''', '''FNetLayer''', '''FNetModel''', '''FNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
141
'''simple docstring''' __SCREAMING_SNAKE_CASE :List[str] = '''0.18.2''' from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
22
0
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None: snake_case , snake_case : Dict = analyze_text(__lowercase ) snake_case : List[Any] = list(""" """ + ascii_lowercase ) # what is our total sum of probabilities. snake_case : Any = sum(single_char_strings.values() ) # one length string snake_case : Optional[Any] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: snake_case : Union[str, Any] = single_char_strings[ch] snake_case : str = my_str / all_sum my_fir_sum += prob * math.loga(__lowercase ) # entropy formula. # print entropy print(f"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string snake_case : List[Any] = sum(two_char_strings.values() ) snake_case : Optional[int] = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: snake_case : Tuple = cha + cha if sequence in two_char_strings: snake_case : Union[str, Any] = two_char_strings[sequence] snake_case : Dict = int(__lowercase ) / all_sum my_sec_sum += prob * math.loga(__lowercase ) # print second entropy print(f"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[dict, dict]: snake_case : List[Any] = Counter() # type: ignore snake_case : Optional[Any] = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 ,len(__lowercase ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]: import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
124
'''simple docstring''' import re from filelock import FileLock try: import nltk __SCREAMING_SNAKE_CASE :Optional[int] = True except (ImportError, ModuleNotFoundError): __SCREAMING_SNAKE_CASE :str = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def UpperCAmelCase_ ( __lowercase : str ) -> str: '''simple docstring''' re.sub("<n>" , "" , __lowercase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__lowercase ) )
22
0
import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A_ (lowerCAmelCase_ , unittest.TestCase ): UpperCAmelCase__ = DDIMPipeline UpperCAmelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS UpperCAmelCase__ = PipelineTesterMixin.required_optional_params - { """num_images_per_prompt""", """latents""", """callback""", """callback_steps""", } UpperCAmelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS UpperCAmelCase__ = False def _lowercase ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase = UNetaDModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) UpperCAmelCase = DDIMScheduler() UpperCAmelCase = {'''unet''': unet, '''scheduler''': scheduler} return components def _lowercase ( self , _A , _A=0 ): '''simple docstring''' if str(snake_case_ ).startswith('''mps''' ): UpperCAmelCase = torch.manual_seed(snake_case_ ) else: UpperCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ ) UpperCAmelCase = { '''batch_size''': 1, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = '''cpu''' UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = self.pipeline_class(**snake_case_ ) pipe.to(snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) UpperCAmelCase = self.get_dummy_inputs(snake_case_ ) UpperCAmelCase = pipe(**snake_case_ ).images UpperCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 3_2, 3_2, 3) ) UpperCAmelCase = np.array( [1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] ) UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(snake_case_ , 1E-3 ) def _lowercase ( self ): '''simple docstring''' super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def _lowercase ( self ): '''simple docstring''' super().test_save_load_local(expected_max_difference=3E-3 ) def _lowercase ( self ): '''simple docstring''' super().test_save_load_optional_components(expected_max_difference=3E-3 ) def _lowercase ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class A_ (unittest.TestCase ): def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = '''google/ddpm-cifar10-32''' UpperCAmelCase = UNetaDModel.from_pretrained(snake_case_ ) UpperCAmelCase = DDIMScheduler() UpperCAmelCase = DDIMPipeline(unet=snake_case_ , scheduler=snake_case_ ) ddim.to(snake_case_ ) ddim.set_progress_bar_config(disable=snake_case_ ) UpperCAmelCase = torch.manual_seed(0 ) UpperCAmelCase = ddim(generator=snake_case_ , eta=0.0 , output_type='''numpy''' ).images UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) UpperCAmelCase = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = '''google/ddpm-ema-bedroom-256''' UpperCAmelCase = UNetaDModel.from_pretrained(snake_case_ ) UpperCAmelCase = DDIMScheduler.from_pretrained(snake_case_ ) UpperCAmelCase = DDIMPipeline(unet=snake_case_ , scheduler=snake_case_ ) ddpm.to(snake_case_ ) ddpm.set_progress_bar_config(disable=snake_case_ ) UpperCAmelCase = torch.manual_seed(0 ) UpperCAmelCase = ddpm(generator=snake_case_ , output_type='''numpy''' ).images UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 2_5_6, 2_5_6, 3) UpperCAmelCase = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
273
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class A_ : def __init__( self : str , snake_case_ : int , snake_case_ : Union[str, Any]=2 , snake_case_ : List[Any]=True , snake_case_ : str=False , snake_case_ : str=1_0 , snake_case_ : str=3 , snake_case_ : Dict=3_2 * 4 , snake_case_ : Any=3_2 * 6 , snake_case_ : Optional[Any]=4 , snake_case_ : Optional[int]=3_2 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = is_training _UpperCAmelCase = use_auxiliary_loss _UpperCAmelCase = num_queries _UpperCAmelCase = num_channels _UpperCAmelCase = min_size _UpperCAmelCase = max_size _UpperCAmelCase = num_labels _UpperCAmelCase = mask_feature_size def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( snake_case_ ) _UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_ ) _UpperCAmelCase = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_ ) > 0.5 ).float() _UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=snake_case_ ) > 0.5).long() _UpperCAmelCase = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase ( self : List[Any] ): return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def lowercase ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def lowercase ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ): _UpperCAmelCase = output.encoder_hidden_states _UpperCAmelCase = output.pixel_decoder_hidden_states _UpperCAmelCase = output.transformer_decoder_hidden_states self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(snake_case_ ) , config.decoder_config.decoder_layers ) def lowercase ( self : Tuple , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Optional[Any]=False ): with torch.no_grad(): _UpperCAmelCase = MaskFormerModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() _UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ ) _UpperCAmelCase = model(snake_case_ , output_hidden_states=snake_case_ ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(snake_case_ , snake_case_ ) def lowercase ( self : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : str , snake_case_ : List[Any] ): _UpperCAmelCase = MaskFormerForInstanceSegmentation(config=snake_case_ ) model.to(snake_case_ ) model.eval() def comm_check_on_output(snake_case_ : int ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ ) _UpperCAmelCase = model(snake_case_ ) comm_check_on_output(snake_case_ ) _UpperCAmelCase = model( pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ) comm_check_on_output(snake_case_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _lowerCamelCase : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () _lowerCamelCase : Tuple = ( {"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) _lowerCamelCase : Optional[Any] = False _lowerCamelCase : Dict = False _lowerCamelCase : Any = False _lowerCamelCase : List[Any] = False def lowercase ( self : Optional[int] ): _UpperCAmelCase = MaskFormerModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ ) def lowercase ( self : Optional[Any] ): self.config_tester.run_common_tests() def lowercase ( self : Union[str, Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ ) def lowercase ( self : int ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*snake_case_ ) @unittest.skip(reason="MaskFormer does not use inputs_embeds" ) def lowercase ( self : Any ): pass @unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" ) def lowercase ( self : List[str] ): pass @unittest.skip(reason="MaskFormer is not a generative model" ) def lowercase ( self : List[str] ): pass @unittest.skip(reason="MaskFormer does not use token embeddings" ) def lowercase ( self : List[Any] ): pass @require_torch_multi_gpu @unittest.skip( reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def lowercase ( self : Any ): pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def lowercase ( self : Union[str, Any] ): pass def lowercase ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case_ ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case_ ) @slow def lowercase ( self : Optional[int] ): for model_name in ["facebook/maskformer-swin-small-coco"]: _UpperCAmelCase = MaskFormerModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def lowercase ( self : Optional[int] ): _UpperCAmelCase = (self.model_tester.min_size,) * 2 _UpperCAmelCase = { "pixel_values": torch.randn((2, 3, *size) , device=snake_case_ ), "mask_labels": torch.randn((2, 1_0, *size) , device=snake_case_ ), "class_labels": torch.zeros(2 , 1_0 , device=snake_case_ ).long(), } _UpperCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(snake_case_ ) _UpperCAmelCase = model(**snake_case_ ) self.assertTrue(outputs.loss is not None ) def lowercase ( self : Dict ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ ) def lowercase ( self : Any ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case_ ).to(snake_case_ ) _UpperCAmelCase = model(**snake_case_ , output_attentions=snake_case_ ) self.assertTrue(outputs.attentions is not None ) def lowercase ( self : int ): if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _UpperCAmelCase = self.all_model_classes[1] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.train() _UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ).loss loss.backward() def lowercase ( self : int ): # only MaskFormerForInstanceSegmentation has the loss _UpperCAmelCase = self.all_model_classes[1] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.train() _UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ) _UpperCAmelCase = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _UpperCAmelCase = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _UpperCAmelCase = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _UpperCAmelCase = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=snake_case_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __SCREAMING_SNAKE_CASE :Dict = 1e-4 def UpperCAmelCase_ ( ) -> List[str]: '''simple docstring''' _UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_vision @slow class A_ ( unittest.TestCase ): @cached_property def lowercase ( self : Dict ): return ( MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" ) if is_vision_available() else None ) def lowercase ( self : List[Any] ): _UpperCAmelCase = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(snake_case_ ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ ) _UpperCAmelCase = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) _UpperCAmelCase = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(snake_case_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) _UpperCAmelCase = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(snake_case_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) _UpperCAmelCase = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(snake_case_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : Tuple ): _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(snake_case_ ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ ) _UpperCAmelCase = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) # masks_queries_logits _UpperCAmelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _UpperCAmelCase = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] _UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) # class_queries_logits _UpperCAmelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _UpperCAmelCase = torch.tensor( [ [1.6_512e00, -5.2_572e00, -3.3_519e00], [3.6_169e-02, -5.9_025e00, -2.9_313e00], [1.0_766e-04, -7.7_630e00, -5.1_263e00], ] ).to(snake_case_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : int ): _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" ) .to(snake_case_ ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ ) _UpperCAmelCase = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) # masks_queries_logits _UpperCAmelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _UpperCAmelCase = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] _UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) # class_queries_logits _UpperCAmelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _UpperCAmelCase = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(snake_case_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : List[Any] ): _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(snake_case_ ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="pt" , ) _UpperCAmelCase = inputs["pixel_values"].to(snake_case_ ) _UpperCAmelCase = [el.to(snake_case_ ) for el in inputs["mask_labels"]] _UpperCAmelCase = [el.to(snake_case_ ) for el in inputs["class_labels"]] with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) self.assertTrue(outputs.loss is not None )
22
0
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( "files" ,[ ["full:README.md", "dataset_infos.json"], ["empty:README.md", "dataset_infos.json"], ["dataset_infos.json"], ["full:README.md"], ] ,) def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : int ): """simple docstring""" A_ = tmp_path_factory.mktemp("dset_infos_dir" ) if "full:README.md" in files: with open(dataset_infos_dir / "README.md" ,"w" ) as f: f.write("---\ndataset_info:\n dataset_size: 42\n---" ) if "empty:README.md" in files: with open(dataset_infos_dir / "README.md" ,"w" ) as f: f.write("" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / "dataset_infos.json" ,"w" ) as f: f.write("{\"default\": {\"dataset_size\": 42}}" ) A_ = DatasetInfosDict.from_directory(__lowercase ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( "dataset_info" ,[ DatasetInfo(), DatasetInfo( description="foo" ,features=Features({"a": Value("int32" )} ) ,builder_name="builder" ,config_name="config" ,version="1.0.0" ,splits=[{"name": "train"}] ,download_size=42 ,), ] ,) def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : DatasetInfo ): """simple docstring""" A_ = str(__lowercase ) dataset_info.write_to_directory(__lowercase ) A_ = DatasetInfo.from_directory(__lowercase ) assert dataset_info == reloaded assert os.path.exists(os.path.join(__lowercase ,"dataset_info.json" ) ) def __snake_case ( ): """simple docstring""" A_ = DatasetInfo( description="foo" ,citation="bar" ,homepage="https://foo.bar" ,license="CC0" ,features=Features({"a": Value("int32" )} ) ,post_processed={} ,supervised_keys=() ,task_templates=[] ,builder_name="builder" ,config_name="config" ,version="1.0.0" ,splits=[{"name": "train", "num_examples": 42}] ,download_checksums={} ,download_size=1337 ,post_processing_size=442 ,dataset_size=1234 ,size_in_bytes=1337 + 442 + 1234 ,) A_ = dataset_info._to_yaml_dict() assert sorted(__lowercase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] ,(list, dict, int, str) ) A_ = yaml.safe_dump(__lowercase ) A_ = yaml.safe_load(__lowercase ) assert dataset_info_yaml_dict == reloaded def __snake_case ( ): """simple docstring""" A_ = DatasetInfo() A_ = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( "dataset_infos_dict" ,[ DatasetInfosDict(), DatasetInfosDict({"default": DatasetInfo()} ), DatasetInfosDict({"my_config_name": DatasetInfo()} ), DatasetInfosDict( { "default": DatasetInfo( description="foo" ,features=Features({"a": Value("int32" )} ) ,builder_name="builder" ,config_name="config" ,version="1.0.0" ,splits=[{"name": "train"}] ,download_size=42 ,) } ), DatasetInfosDict( { "v1": DatasetInfo(dataset_size=42 ), "v2": DatasetInfo(dataset_size=1337 ), } ), ] ,) def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : DatasetInfosDict ): """simple docstring""" A_ = str(__lowercase ) dataset_infos_dict.write_to_directory(__lowercase ) A_ = DatasetInfosDict.from_directory(__lowercase ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): A_ = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml A_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(__lowercase ,"README.md" ) )
312
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: __SCREAMING_SNAKE_CASE :List[Any] = None __SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE :List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} __SCREAMING_SNAKE_CASE :List[Any] = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''', }, } __SCREAMING_SNAKE_CASE :Optional[Any] = { '''albert-base-v1''': 512, '''albert-large-v1''': 512, '''albert-xlarge-v1''': 512, '''albert-xxlarge-v1''': 512, '''albert-base-v2''': 512, '''albert-large-v2''': 512, '''albert-xlarge-v2''': 512, '''albert-xxlarge-v2''': 512, } __SCREAMING_SNAKE_CASE :Optional[int] = '''▁''' class A_ ( lowerCAmelCase_ ): _lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES _lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : int = AlbertTokenizer def __init__( self : Optional[Any] , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=True , snake_case_ : str=True , snake_case_ : Tuple=False , snake_case_ : List[Any]="[CLS]" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : str="<unk>" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : List[Any]="<pad>" , snake_case_ : List[str]="[CLS]" , snake_case_ : int="[MASK]" , **snake_case_ : Any , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _UpperCAmelCase = ( AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token ) super().__init__( snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , ) _UpperCAmelCase = do_lower_case _UpperCAmelCase = remove_space _UpperCAmelCase = keep_accents _UpperCAmelCase = vocab_file _UpperCAmelCase = False if not self.vocab_file else True def lowercase ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ): _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowercase ( self : Dict , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ): _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(snake_case_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return _UpperCAmelCase = os.path.join( snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ): copyfile(self.vocab_file , snake_case_ ) return (out_vocab_file,)
22
0
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder _UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name _UpperCamelCase = 256 class __lowercase (lowerCAmelCase_ ): _UpperCamelCase = ["""melgan"""] def __init__( self , A_ , A_ , A_ , A_ , A_ , ) ->Optional[Any]: '''simple docstring''' super().__init__() # From MELGAN __lowerCAmelCase : Optional[int] = math.log(1e-5 ) # Matches MelGAN training. __lowerCAmelCase : Any = 4.0 # Largest value for most examples __lowerCAmelCase : str = 128 self.register_modules( notes_encoder=snake_case_ , continuous_encoder=snake_case_ , decoder=snake_case_ , scheduler=snake_case_ , melgan=snake_case_ , ) def UpperCamelCase__ ( self , A_ , A_=(-1.0, 1.0) , A_=False ) ->Dict: '''simple docstring''' __lowerCAmelCase, __lowerCAmelCase : Optional[Any] = output_range if clip: __lowerCAmelCase : Any = torch.clip(snake_case_ , self.min_value , self.max_value ) # Scale to [0, 1]. __lowerCAmelCase : List[Any] = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def UpperCamelCase__ ( self , A_ , A_=(-1.0, 1.0) , A_=False ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase, __lowerCAmelCase : List[Any] = input_range __lowerCAmelCase : str = torch.clip(snake_case_ , snake_case_ , snake_case_ ) if clip else outputs # Scale to [0, 1]. __lowerCAmelCase : Tuple = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = input_tokens > 0 __lowerCAmelCase, __lowerCAmelCase : List[str] = self.notes_encoder( encoder_input_tokens=snake_case_ , encoder_inputs_mask=snake_case_ ) __lowerCAmelCase, __lowerCAmelCase : List[str] = self.continuous_encoder( encoder_inputs=snake_case_ , encoder_inputs_mask=snake_case_ ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->Any: '''simple docstring''' __lowerCAmelCase : List[str] = noise_time if not torch.is_tensor(snake_case_ ): __lowerCAmelCase : Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(snake_case_ ) and len(timesteps.shape ) == 0: __lowerCAmelCase : int = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __lowerCAmelCase : Union[str, Any] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) __lowerCAmelCase : Union[str, Any] = self.decoder( encodings_and_masks=snake_case_ , decoder_input_tokens=snake_case_ , decoder_noise_time=snake_case_ ) return logits @torch.no_grad() def __call__( self , A_ , A_ = None , A_ = 100 , A_ = True , A_ = "numpy" , A_ = None , A_ = 1 , ) ->Tuple: '''simple docstring''' if (callback_steps is None) or ( callback_steps is not None and (not isinstance(snake_case_ , snake_case_ ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(snake_case_ )}.""" ) __lowerCAmelCase : Tuple = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) __lowerCAmelCase : List[str] = np.zeros([1, 0, self.n_dims] , np.floataa ) __lowerCAmelCase : List[str] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=snake_case_ , device=self.device ) for i, encoder_input_tokens in enumerate(snake_case_ ): if i == 0: __lowerCAmelCase : str = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. __lowerCAmelCase : Tuple = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=snake_case_ , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. __lowerCAmelCase : Optional[int] = ones __lowerCAmelCase : List[Any] = self.scale_features( snake_case_ , output_range=[-1.0, 1.0] , clip=snake_case_ ) __lowerCAmelCase : List[str] = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=snake_case_ , continuous_mask=snake_case_ , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop __lowerCAmelCase : str = randn_tensor( shape=encoder_continuous_inputs.shape , generator=snake_case_ , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(snake_case_ ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): __lowerCAmelCase : List[str] = self.decode( encodings_and_masks=snake_case_ , input_tokens=snake_case_ , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 __lowerCAmelCase : Optional[int] = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample __lowerCAmelCase : str = self.scale_to_features(snake_case_ , input_range=[-1.0, 1.0] ) __lowerCAmelCase : Union[str, Any] = mel[:1] __lowerCAmelCase : Optional[Any] = mel.cpu().float().numpy() __lowerCAmelCase : Any = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(snake_case_ , snake_case_ ) logger.info('''Generated segment''' , snake_case_ ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( '''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' ) elif output_type == "numpy" and self.melgan is None: raise ValueError( '''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' ) if output_type == "numpy": __lowerCAmelCase : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: __lowerCAmelCase : Optional[int] = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=snake_case_ )
275
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging __SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE :int = { '''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class A_ ( lowerCAmelCase_ ): _lowerCamelCase : int = """perceiver""" def __init__( self : Any , snake_case_ : List[Any]=2_5_6 , snake_case_ : str=1_2_8_0 , snake_case_ : Optional[int]=7_6_8 , snake_case_ : int=1 , snake_case_ : List[Any]=2_6 , snake_case_ : Dict=8 , snake_case_ : List[Any]=8 , snake_case_ : Tuple=None , snake_case_ : Tuple=None , snake_case_ : Any="kv" , snake_case_ : Any=1 , snake_case_ : List[str]=1 , snake_case_ : Optional[int]="gelu" , snake_case_ : List[Any]=0.1 , snake_case_ : Dict=0.0_2 , snake_case_ : int=1e-12 , snake_case_ : List[str]=True , snake_case_ : str=2_6_2 , snake_case_ : Optional[Any]=2_0_4_8 , snake_case_ : Union[str, Any]=5_6 , snake_case_ : Dict=[3_6_8, 4_9_6] , snake_case_ : Tuple=1_6 , snake_case_ : Union[str, Any]=1_9_2_0 , snake_case_ : List[Any]=1_6 , snake_case_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **snake_case_ : List[Any] , ): super().__init__(**snake_case_ ) _UpperCAmelCase = num_latents _UpperCAmelCase = d_latents _UpperCAmelCase = d_model _UpperCAmelCase = num_blocks _UpperCAmelCase = num_self_attends_per_block _UpperCAmelCase = num_self_attention_heads _UpperCAmelCase = num_cross_attention_heads _UpperCAmelCase = qk_channels _UpperCAmelCase = v_channels _UpperCAmelCase = cross_attention_shape_for_attention _UpperCAmelCase = self_attention_widening_factor _UpperCAmelCase = cross_attention_widening_factor _UpperCAmelCase = hidden_act _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = use_query_residual # masked language modeling attributes _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings # image classification attributes _UpperCAmelCase = image_size # flow attributes _UpperCAmelCase = train_size # multimodal autoencoding attributes _UpperCAmelCase = num_frames _UpperCAmelCase = audio_samples_per_frame _UpperCAmelCase = samples_per_patch _UpperCAmelCase = output_shape class A_ ( lowerCAmelCase_ ): @property def lowercase ( self : int ): if self.task == "multiple-choice": _UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"} else: _UpperCAmelCase = {0: "batch", 1: "sequence"} return OrderedDict( [ ("inputs", dynamic_axis), ("attention_mask", dynamic_axis), ] ) @property def lowercase ( self : Optional[Any] ): return 1e-4 def lowercase ( self : List[str] , snake_case_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , snake_case_ : int = 3 , snake_case_ : int = 4_0 , snake_case_ : int = 4_0 , ): # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(snake_case_ , snake_case_ ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _UpperCAmelCase = compute_effective_axis_dimension( snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _UpperCAmelCase = preprocessor.num_special_tokens_to_add(snake_case_ ) _UpperCAmelCase = compute_effective_axis_dimension( snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ ) # Generate dummy inputs according to compute batch and sequence _UpperCAmelCase = [" ".join(["a"] ) * seq_length] * batch_size _UpperCAmelCase = dict(preprocessor(snake_case_ , return_tensors=snake_case_ ) ) _UpperCAmelCase = inputs.pop("input_ids" ) return inputs elif isinstance(snake_case_ , snake_case_ ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _UpperCAmelCase = compute_effective_axis_dimension(snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch ) _UpperCAmelCase = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) _UpperCAmelCase = dict(preprocessor(images=snake_case_ , return_tensors=snake_case_ ) ) _UpperCAmelCase = inputs.pop("pixel_values" ) return inputs else: raise ValueError( "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
22
0
"""simple docstring""" from torch import nn class UpperCamelCase ( nn.Module ): def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' super().__init__() lowercase_ : List[str] = class_size lowercase_ : Tuple = embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) lowercase_ : List[str] = nn.Linear(snake_case_ ,snake_case_ ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[Any] = self.mlp(snake_case_ ) return logits
213
'''simple docstring''' import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef __SCREAMING_SNAKE_CASE :List[str] = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Tuple ) -> int: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) return (preds == labels).mean() def UpperCAmelCase_ ( __lowercase : int , __lowercase : str ) -> Optional[Any]: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) _UpperCAmelCase = simple_accuracy(__lowercase , __lowercase ) _UpperCAmelCase = fa_score(y_true=__lowercase , y_pred=__lowercase ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : List[str] ) -> List[Any]: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) _UpperCAmelCase = pearsonr(__lowercase , __lowercase )[0] _UpperCAmelCase = spearmanr(__lowercase , __lowercase )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : str , __lowercase : str ) -> Tuple: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) assert len(__lowercase ) == len(__lowercase ), f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}' if task_name == "cola": return {"mcc": matthews_corrcoef(__lowercase , __lowercase )} elif task_name == "sst-2": return {"acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "mrpc": return acc_and_fa(__lowercase , __lowercase ) elif task_name == "sts-b": return pearson_and_spearman(__lowercase , __lowercase ) elif task_name == "qqp": return acc_and_fa(__lowercase , __lowercase ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "qnli": return {"acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "rte": return {"acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "wnli": return {"acc": simple_accuracy(__lowercase , __lowercase )} elif task_name == "hans": return {"acc": simple_accuracy(__lowercase , __lowercase )} else: raise KeyError(__lowercase ) def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Dict , __lowercase : str ) -> Union[str, Any]: '''simple docstring''' warnings.warn(__lowercase , __lowercase ) requires_backends(__lowercase , "sklearn" ) if len(__lowercase ) != len(__lowercase ): raise ValueError(f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}' ) if task_name == "xnli": return {"acc": simple_accuracy(__lowercase , __lowercase )} else: raise KeyError(__lowercase )
22
0
'''simple docstring''' def lowerCAmelCase (): """simple docstring""" return [list(range(1_000 - i , -1_000 - i , -1)) for i in range(1_000)] lowercase_ = generate_large_matrix() lowercase_ = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def lowerCAmelCase (__A): """simple docstring""" assert all(row == sorted(__lowercase , reverse=__lowercase) for row in grid) assert all(list(__lowercase) == sorted(__lowercase , reverse=__lowercase) for col in zip(*__lowercase)) def lowerCAmelCase (__A): """simple docstring""" _a = 0 _a = len(__lowercase) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: _a = (left + right) // 2 _a = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: _a = mid + 1 else: _a = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__lowercase) def lowerCAmelCase (__A): """simple docstring""" _a = 0 _a = len(grid[0]) for i in range(len(__lowercase)): _a = find_negative_index(grid[i][:bound]) total += bound return (len(__lowercase) * len(grid[0])) - total def lowerCAmelCase (__A): """simple docstring""" return len([number for row in grid for number in row if number < 0]) def lowerCAmelCase (__A): """simple docstring""" _a = 0 for row in grid: for i, number in enumerate(__lowercase): if number < 0: total += len(__lowercase) - i break return total def lowerCAmelCase (): """simple docstring""" from timeit import timeit print('''Running benchmarks''') _a = ( '''from __main__ import count_negatives_binary_search, ''' '''count_negatives_brute_force, count_negatives_brute_force_with_break, grid''' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): _a = timeit(F'''{func}(grid=grid)''' , setup=__lowercase , number=500) print(F'''{func}() took {time:0.4f} seconds''') if __name__ == "__main__": import doctest doctest.testmod() benchmark()
211
'''simple docstring''' import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict , __lowercase : str , __lowercase : Optional[Any] , __lowercase : str ) -> List[str]: '''simple docstring''' _UpperCAmelCase = TapasConfig.from_json_file(__lowercase ) # set absolute/relative position embeddings parameter _UpperCAmelCase = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase ) elif task == "WTQ": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = True # hparam_utils.py hparams _UpperCAmelCase = 0.66_4694 _UpperCAmelCase = 0.20_7951 _UpperCAmelCase = 0.12_1194 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = 0.035_2513 _UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = False # hparam_utils.py hparams _UpperCAmelCase = 36.4519 _UpperCAmelCase = 0.90_3421 _UpperCAmelCase = 222.088 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = 0.76_3141 _UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase ) elif task == "TABFACT": _UpperCAmelCase = TapasForSequenceClassification(config=__lowercase ) elif task == "MLM": _UpperCAmelCase = TapasForMaskedLM(config=__lowercase ) elif task == "INTERMEDIATE_PRETRAINING": _UpperCAmelCase = TapasModel(config=__lowercase ) else: raise ValueError(f'Task {task} not supported.' ) print(f'Building PyTorch model from configuration: {config}' ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__lowercase , __lowercase , __lowercase ) # Save pytorch-model (weights and configuration) print(f'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(__lowercase ) # Save tokenizer files print(f'Save tokenizer files to {pytorch_dump_path}' ) _UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 ) tokenizer.save_pretrained(__lowercase ) print("Used relative position embeddings:" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.''' ) parser.add_argument( '''--reset_position_index_per_cell''', default=False, action='''store_true''', help='''Whether to use relative position embeddings or not. Defaults to True.''', ) parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--tapas_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained TAPAS model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __SCREAMING_SNAKE_CASE :List[str] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
22
0